示例#1
0
def delete_internal_leaves_jsome2jall(jall_name, jsome_name):
    #disconnect vertices from g that are disconnect in h
    try:
        g, attributes = graph_analysis.input_graph(
            jall_name, ["history", "vertex_offsets"])
        print("Successfully read in the %s\n" % jall_name)
    except:
        print("Error reading in the %s\n" % jall_name)

    h = graph_analysis.input_graph(jsome_name)

    if len(g.vertices) != len(h.vertices):
        print "ERROR: delete_internal_leaves_jsome2jall, the number of vertices of g and h are not equal!!\nAborted!"
        exit(0)
    for i in xrange(len(g.vertices)):
        if len(h.vertices[i].edges) == 0:
            g.disconnect_vertex(i)

    out_name = jall_name[:-3] + "_delleave.db"
    history = '\n>>> %s: delete_internal_leaves on join_all using the join_some result' % (
        ctime(time()))  ##argv = ['code.py', 'input.db', 'output']
    if attributes.has_key("history"):
        history = attributes["history"] + "\n" + history
        del attributes['history']

    graph_analysis.output_graph(out_name, g, history, attributes)
    print("Succefully wrote the %s\n" % out_name)

    return out_name
              "The --clobber option is needed to overwrite an existing file."

    ##1. borrow from delete_internal_leaves to find segments that are inside

    ##2. borrow from smooth_graph to decide which one is parent and which is daughter branch that needs to be deleted

    ##3. borrow from adjust_junction to find the nearest connection point with preserving the topology

    history = '>>> %s: %s' % (time.ctime(time.time()), string.join(argv))

    ls = []
    if options.labels:
        ls = options.labels.split(',')
        ls = [int(l) for l in ls]

    g, attributes = graph_analysis.input_graph(input_file,
                                               ["history", "vertex_offsets"])

    occlusions, encompassing_e = detect_internal_segments(
        g, ls, options.scale, options.offset)

    for e in occlusions.keys():
        if e not in encompassing_e:  #this way only edges that have no other edge inside them will be deleted
            g.remove_edge(e)

    #NOTE: what we should actually do is to check if all the vertices of an edge are inside another edge => remove that edge
    # if not, then only disconnect_vertex of those inside, then reconnect the rest of the vertices to the encompassing_e

    if attributes.has_key("history"):
        history = attributes["history"] + "\n" + history
        del attributes['history']
        output_file = output_file.replace("_groundtruthlabel", "")
        output_file = output_file.replace("_cyl", "")
    else:
        print len(args), ": ", args
        parser.error("incorrect number of arguments")

    if options.remap:
        output_file = output_file[:-4] + "_remap.mnc"
    if not options.mask:
        output_file = output_file[:-4] + "_notmasked.mnc"
    print output_file
    if not options.clobber and os.path.exists(output_file):
        raise SystemExit, \
            "The --clobber option is needed to overwrite an existing file."

    g = graph_analysis.input_graph(input_file)
    #g= graph_analysis.input_graph("/micehome/sghanavati/Documents/data/journal1data_c57_feb13/LOO_final_revised/autolabel_c57_1_4_S_cyl.db")
    #g= graph_analysis.input_graph("./c57_1_9Nov12/c57_1_graph_add_coverage_delleave_radi1.06_smooth2_simplified_rmisolate_features_groundtruthlabel_cyl_revised.db")

    mr_atlas_file = "/projects/souris/sghanavati/data/c57_male_female_mri_atlas/c57_brain_segmentation_mnc2_merged_reg2vascular.mnc"
    ##mr_atlas_file = "/projects/souris/sghanavati/data/c57_male_female_mri_atlas/c57_brain_segmentation_mnc2_merged_reg2vascular_downsample_merge.mnc"

    ##labels = [2,43,102,143,122,8,5,108,105,35,135,13,236,111,191,190,91,90,200,9,7,10,196,96,198,68,227,168,169,46,12,49,45,14,15,17,18,3,4]
    #arteries :    8:108,5:105,135:35,111:35,236:35,191:91,190:90,68:168,227:169
    if not options.labels:
        labels = [
            122, 8, 5, 108, 105, 35, 135, 13, 236, 111, 191, 190, 91, 90, 68,
            227, 168, 169, 46, 12, 49, 45, 14, 15, 17, 18, 3, 4
        ]
        if options.veins:
            labels = [
    if not options.clobber and os.path.exists(output_file):
        raise SystemExit, \
            "The --clobber option is needed to overwrite an existing file."

    radius_threshold = options.radius_threshold
    length_threshold = options.length_threshold
    intermed_threshold = options.intermed_threshold

    if not options.clobber and os.path.exists(output_file):
        raise SystemExit, \
            "The --clobber option is needed to overwrite an existing file."

    try:
        g, attributes = graph_analysis.input_graph(
            input_file, ["history", "vertex_offsets"
                         ])  # open graph data and copy contents to memory
        #g = graph_analysis.input_graph("tree_mask_blur_delleaves.db")
        print("Succefully read in the input.db\n")
    except:
        print("Error reading in the input.db\n")

    h = copy.deepcopy(g)
    # simplify h by reducing to vessel segments
    graph_analysis.simplify_graph_retaining_intermediaries(h)
    # estimate lengths and average diameters of each segment
    graph_analysis.estimate_edge_diameters(h)
    graph_analysis.estimate_edge_lengths(h)

    remove_edge_num = 0
    for e in h.edge_list():
        ltriples = options.landmark3.split(',')
        labeltriples = []
        for pair in ltriples:
            labels = pair.split(':')
            labels = [int(l) for l in labels]
            labeltriples.append(set(labels))
        all_l = [list(l)[0] for l in labeltriples] + [
            list(l)[1] for l in labeltriples
        ] + [list(l)[2] for l in labeltriples]
        unique_triple_l = []
        for l in all_l:
            if l not in unique_triple_l:
                unique_triple_l.append(l)

    if len(args) == 2:
        g = graph_analysis.input_graph(input_file)
        landmarks = []
        according_l = []

        #for e in g.edge_list():
        #if len(vessel_analysis.find_neighbor_edges_asymmetrical(g,e,0)) not in [0,2]:
        #print e,":", vessel_analysis.find_neighbor_edges_asymmetrical(g,e,0)
        #if len(vessel_analysis.find_neighbor_edges_asymmetrical(g,e,1)) not in [0,2]:
        #print e,":", vessel_analysis.find_neighbor_edges_asymmetrical(g,e,1)

        if options.landmark:
            for e in g.edge_list():
                if g.edge_property(e, 'label') in unique_l:
                    e1_neighbs = vessel_analysis.find_neighbor_edges_asymmetrical(
                        g, e, 0)
                    e2_neighbs = vessel_analysis.find_neighbor_edges_asymmetrical(
示例#6
0
    if options.labels:
        ls = options.labels.split(',')
        ls = [int(l) for l in ls]
    else:
        ls = labelNumeric2Name.keys()

    if options.combine_labels:
        lpairs = options.combine_labels.split(',')
        labelpairs = {}
        for pair in lpairs:
            plabels = pair.split(':')
            plabels = [int(l) for l in plabels]
            labelpairs[plabels[0]] = plabels[1]

    ref = graph_analysis.input_graph(groundturht_file)
    g = graph_analysis.input_graph(test_file)
    g_gd = graph_analysis.input_graph(test_file_gd)
    g_s = graph_analysis.input_graph(test_file_s)

    for e in g.edge_list():
        if int(g.edge_property(e, 'cyl_label')) in labelpairs.keys():
            g.set_edge_property(
                e, 'cyl_label', labelpairs[int(g.edge_property(e,
                                                               'cyl_label'))])

    for e in g_gd.edge_list():
        if int(g_gd.edge_property(e, 'cyl_label')) in labelpairs.keys():
            g_gd.set_edge_property(
                e, 'cyl_label',
                labelpairs[int(g_gd.edge_property(e, 'cyl_label'))])
    else:
        input_files = args[0:-1]
        output_file = args[-1]

    if not options.clobber and os.path.exists(output_file):
        raise SystemExit, \
            "The --clobber option is needed to overwrite an existing file."

    label_vals = {}
    weight_vals = {}
    weighted_label_vals = {}

    for input_file in input_files:
        # reads in the graph to be labeled
        try:
            g, attributes = graph_analysis.input_graph(
                input_file, ["history", "vertex_offsets"])
            print("Succefully read in the %s\n" % input_file)
        except:
            print("Error reading in the %s\n" % input_file)

        for e in g.edge_list():
            l = g.edge_property(e, 'label')
            if l not in weight_vals.keys():
                weight_vals[l] = 0
            if l not in weighted_label_vals.keys():
                weighted_label_vals[l] = 0
            if options.labelprop == 'estimated_label':
                weight_vals[l] += g.edge_property(
                    e, 'diameter') * g.edge_property(
                        e, 'diameter') * g.edge_property(
                            e, 'length')  #cylinder volume = pi*r^2*length
示例#8
0
    print(cmd)
    sys.stdout.flush()
    os.system(cmd)

    cmd = ("modify_db_intermediaries.py %s %s %s" %
           (input_file[:-3] + "graph2graph.db", ref_graph,
            output_file[:-3] + "_nofeature.db"))
    print(cmd)
    sys.stdout.flush()
    os.system(cmd)

    #output_file = output_file[:-3]+"_intm.db"

    try:
        g, attributes = graph_analysis.input_graph(
            output_file[:-3] + "_nofeature.db",
            ["history", "vertex_offsets"
             ])  # open graph data and copy contents to memory
        #g = graph_analysis.input_graph("tree_mask_blur_delleaves.db")
        print("Succefully read in the %s\n" % output_file[:-3] +
              "_nofeature.db")
        sys.stdout.flush()
    except:
        print("Error reading in the %s\n" % output_file[:-3] + "_nofeature.db")
        sys.stdout.flush()

    #redo the simplified graph and feature extraction for new edges!
    print len(g.edge_list()), " ", len(g.vertices), "\nsimplifying edges:"
    sys.stdout.flush()
    #for e in g.edge_list():
    #print e, " ",
    #print " "
示例#9
0
def graph2featureVect(training_files,featureNames):
	graphs=[]
	for training_file in training_files:
		try:
			graphs.append( graph_analysis.input_graph(training_file))  	# open graph data and copy contents to memory
			print ("Succefully read in %s\n" %training_file)	
		except:
			print("Error reading in %s\n" %training_file)
			
	#### initialize the feature_vector to be zeros(#datapoints=sum(len(g.edge_list()) w label!=0 , len(featureNames))
	##	initialize the target_vector to be zeros(#datapoints=sum(len(g.edge_list()) w label!=0 , 1)
	MRI_labels=[]
	if 'mri_label_dist' in featureNames:
		MRI_labels=[l[0] for l in graphs[0].edge_property(graphs[0].edge_list()[0],'mri_label_dist')]
		feature_vector = Matlib.empty((0,len(featureNames)+len(MRI_labels)-1),float)
	else:
		feature_vector = Matlib.empty((0,len(featureNames)),float)
	target_vector = []	#Matlib.empty((0,1),float)
	labelNumerics = []
	edge_w_indx = {}
	
	g_cnt=-1
	e_cnt = 0
	for g in graphs:
		g_cnt =g_cnt+1
		for e in g.edge_list():
			if 'label' not in g.edge_properties(e).keys():
				print ("ERROR! The edge (%d,%d) in graph %s doesn't have label property.\nAborted!\n" %(e[0],e[1],training_files[g_cnt]))
				exit (0)
			#else:
			if g.edge_property(e,'label')>-1:	#including the edges with label 0
				#target_vector= numpy.vstack([target_vector, array(g.edge_property(e,'label'))])
				target_vector.append(int(g.edge_property(e,'label')))
				if g.edge_property(e,'label') not in labelNumerics and g.edge_property(e,'label')>0:
					labelNumerics.append(int(g.edge_property(e,'label')))
				features=[]
				for f in featureNames:
					if f not in g.edge_properties(e).keys():
						print ("ERROR! The edge (%d,%d) in graph %s doesn't have feature %s property.\nAborted!\n" %(e[0],e[1],training_files[g_cnt],f))
						exit (0)
					if not f== 'mri_label_dist' and not f=='rel_dir' and not f=='rel_diameter':	
						features.append(g.edge_property(e,f))
					elif f== 'mri_label_dist':
						for mr_l in MRI_labels:
							#if type(g.edge_property(e,'mri_label_dist'))==list:
								#print mr_l,":",e, " list ", training_files[g_cnt]
								#ind=[l[0] for l in g.edge_property(e,'mri_label_dist')].index(mr_l)
							#else:
								#print mr_l,":",e, ", ", training_files[g_cnt]
								#ind=[l[0] for l in g.edge_property(e,'mri_label_dist').tolist()].index(mr_l)
							mri_label_dist_list = [int(l[0]) for l in g.edge_property(e,'mri_label_dist')]
							if mr_l in mri_label_dist_list:
								ind = mri_label_dist_list.index(mr_l)
								features.append(g.edge_property(e,'mri_label_dist')[ind][1])
					elif f=='rel_dir' or f=='rel_diameter':
						rel_f = 1.0
						for rel_i in range(min(len(g.edge_property(e,f)),4)):	#we only consider up to 4 adjacent edges, if there are less 1.0 will be used
							rel_f = rel_f * g.edge_property(e,f)[rel_i]
						features.append(rel_f)	
							
				feature_vector	= numpy.vstack([feature_vector, array(features)])		#hstack #a = matrix([[10,20,30]]); a=append(a,[[1,2,3]],axis=0); a=append(a,[[15],[15]],axis=1)
			edge_w_indx[e_cnt]= e 
			e_cnt = e_cnt+1
			
	
	#### !!find neighbouring edges and in iterations find their
	
	adjacency_matrix = (1.0/len(labelNumerics))*Matlib.ones((len(labelNumerics),len(labelNumerics)+1),float)		#### smoothing (for 0s in the adj_matrix)! to be minimum of 1 occurance for the nieghbourhood!
	edge_neighboring_indx = {}
	
	g_cnt=-1
	e_cnt = 0
	for g in graphs:
		g_cnt =g_cnt+1
		for e in g.edge_list():
			#if 'label' not in g.edge_properties(e).keys():
				#print ("ERROR! The edge (%d,%d) in graph %s doesn't have label property.\nAborted!\n" %(e[0],e[1],training_files[g_cnt]))
                                #exit (0)
                        #else:
			if g.edge_property(e,'label')>0:
				indx0= labelNumerics.index(g.edge_property(e,'label'))
					
				e1_neighbours= g.vertices[e[0]].edges
				#e1_neighbours.remove(e2)
				for v in e1_neighbours:
					if v!=e[1]:
						neighbor_edge=tuple((min(e[0],v),max(e[0],v)))
						if g.edge_property(neighbor_edge,'label')>0:
							indx1= labelNumerics.index(g.edge_property(neighbor_edge,'label'))
							adjacency_matrix[indx0,indx1]= adjacency_matrix[indx0,indx1]+1
				e2_neighbours= g.vertices[e[1]].edges
				#e2_neighbours.remove(e1)
				for v in e2_neighbours:
					if v!=e[0]:
						neighbor_edge=tuple((min(e[1],v),max(e[1],v)))
						if g.edge_property(neighbor_edge,'label')>0:
							indx1= labelNumerics.index(g.edge_property(neighbor_edge,'label'))
							adjacency_matrix[indx0,indx1]= adjacency_matrix[indx0,indx1]+1
					
				if len(g.vertices[e[0]].edges)==1:		#end point
					adjacency_matrix[indx0,len(labelNumerics)] +=1
				if len(g.vertices[e[1]].edges)==1:		#end point
					adjacency_matrix[indx0,len(labelNumerics)] +=1					
			#find edge neighbouring indeces
			neighbor_indx=[]
			e1_neighbours= g.vertices[e[0]].edges
			for v in e1_neighbours:
				if v!=e[1]:
					neighbor_edge=tuple((min(e[0],v),max(e[0],v)))
					neighbor_indx.append(find_key(edge_w_indx, neighbor_edge, e_cnt))
			e2_neighbours= g.vertices[e[1]].edges
			for v in e2_neighbours:
				if v!=e[0]:
					neighbor_edge=tuple((min(e[1],v),max(e[1],v)))
					neighbor_indx.append(find_key(edge_w_indx, neighbor_edge, e_cnt))
					
			#if (len(neighbor_indx)!=2 and len(neighbor_indx)!=4):
				#print ("\n\nERROR! edge (%d,%d) has %d neighbouring vertices!" %(e[0],e[1],len(neighbor_indx)))
				#if (len(neighbor_indx)!=0):
					#print "Aborted!\n"
					#exit(0)
			edge_neighboring_indx[e_cnt]=neighbor_indx
			e_cnt = e_cnt+1
			
	#### adjacency matrix normalization 
	for i in range(adjacency_matrix.shape[0]):
		adjacency_matrix[i,:]= adjacency_matrix[i,:]/sum(adjacency_matrix[i,:])
		
	
	print_adj_mat (adjacency_matrix,labelNumerics)
	
	return labelNumerics, feature_vector, target_vector, adjacency_matrix, edge_w_indx, edge_neighboring_indx 	#featureVect
    sys.stdout.flush()

    tic = time.time()

    #groundturht_file, test_file, ref_graph, mr_atlas_file, mr_centroids_file, reference_file, output_file = args
    groundturht_file, test_file, output_file = args

    #cmnd = ("python /projects/souris/sghanavati/src/scripts/cerebrovascular_analysis/cylinder2graph.py %s %s %s %s %s %s --use_cyl_label --clobber" %(groundturht_file,ref_graph, mr_atlas_file, mr_centroids_file, reference_file,groundturht_file[:-2]+"db"))
    #print "\n", cmnd
    #os.system(cmnd)

    #cmnd = ("python /projects/souris/sghanavati/src/scripts/cerebrovascular_analysis/cylinder2graph.py %s %s %s %s %s %s --use_cyl_label --clobber" %(test_file,ref_graph, mr_atlas_file, mr_centroids_file, reference_file,test_file[:-2]+"db"))
    #print "\n", cmnd
    #os.system(cmnd)

    ref = graph_analysis.input_graph(groundturht_file[:-2] + "db")
    g = graph_analysis.input_graph(test_file[:-2] + "db")
    h = copy.deepcopy(g)

    labeled_num = 0
    error_num = 0
    total_volume = 0
    error_volume = 0
    availabel_labels = []

    for e in g.edge_list():
        if e in ref.edge_list():
            if int(ref.edge_property(e, 'cyl_label')) not in availabel_labels:
                availabel_labels.append(int(ref.edge_property(e, 'cyl_label')))
            labeled_num += 1
            e_radius = ref.edge_property(e, 'cyl_radius')
    if len(args) != 5:
        parser.error("incorrect number of arguments")
        
    
    graph_file, mr_atlas_file, mr_centroids_file, dir_reference_file, output_file = args


    #### mri_distance feature:
    cmd=("feature_MRI_labels.py %s %s %s --clobber" %(graph_file,mr_atlas_file, output_file))	
    print(cmd)
    sys.stdout.flush()
    check_call(cmd, shell=True)	
    

    try:
        g, attributes = graph_analysis.input_graph(output_file, ["history", "vertex_offsets"])  	# open graph data and copy contents to memory
        print ("Successfully read in the %s\n" %graph_file)	
        sys.stdout.flush()
    except:
        print("Error reading in the %s \n" %graph_file)
        sys.stdout.flush()



    
    
    mr = shelve.open(mr_centroids_file, "r")
    mr_centroids = mr['mr_centroids']
    mr.close()
    
    #### find curvature and midpoint and turtoisity of each edge
示例#12
0
            i1=l[i0+1:].index(';')+i0+1
            labelNumeric2Name[int(l[0:i0])]=l[i0+1:i1]
        f.close()   
        
    if options.labels:
        ls = options.labels.split(',')
        ls = [int(l) for l in ls]
    else:
        ls = []
        ls_name = []
        for l in labelNumeric2Name.keys():
            if labelNumeric2Name[l] not in ls_name:
                ls.append(l)
                ls_name.append(labelNumeric2Name[l])
    #ls = labelNumeric2Name.keys()
        
    f = open(options.output_name,'w')
    f.write( "strain,sample, label, label_Name, initial_RR,GD_RR,SA_RR, initialvol_RR,GDvol_RR,SAvol_RR") 
    ls = [int(l) for l in ls]
    for input_file in input_files:
        if os.path.exists(input_file) and os.path.exists(input_file[:-3]+"_autolabel_initial_cyl.db") and os.path.exists(input_file[:-3]+"_autolabel_GD_cyl.db") and os.path.exists(input_file[:-3]+"_autolabel_S_cyl.db"):
            g = graph_analysis.input_graph(input_file)
            ginit = graph_analysis.input_graph(input_file[:-3]+"_autolabel_initial_cyl.db")
            ggd = graph_analysis.input_graph(input_file[:-3]+"_autolabel_GD_cyl.db")
            gsa = graph_analysis.input_graph(input_file[:-3]+"_autolabel_S_cyl.db")
            f.write( "\n"+os.path.basename(input_file)[:3]+ ","+os.path.basename(input_file)[:14]+ ",0,total,"+ str(totallabelcomparison(g,ginit,labelNumeric2Name,'cyl_label')[0])+","+str(totallabelcomparison(g,ggd,labelNumeric2Name,'cyl_label')[0]) +","+ str(totallabelcomparison(g,gsa,labelNumeric2Name,'cyl_label')[0]) +"," + str(totallabelcomparison(g,ginit,labelNumeric2Name,'cyl_label')[1])+","+str(totallabelcomparison(g,ggd,labelNumeric2Name,'cyl_label')[1]) +","+ str(totallabelcomparison(g,gsa,labelNumeric2Name,'cyl_label')[1])) 
            for l in ls:
                f.write( "\n"+os.path.basename(input_file)[:3]+ ","+os.path.basename(input_file)[:14]+ ","+ str(l)+ ","+ labelNumeric2Name[l]+ ","+ str(labelcomparison(g,ginit,l,labelNumeric2Name,'cyl_label')[0])+","+str(labelcomparison(g,ggd,l,labelNumeric2Name,'cyl_label')[0]) +","+ str(labelcomparison(g,gsa,l,labelNumeric2Name,'cyl_label')[0]) + ","+ str(labelcomparison(g,ginit,l,labelNumeric2Name,'cyl_label')[1])+","+str(labelcomparison(g,ggd,l,labelNumeric2Name,'cyl_label')[1]) +","+ str(labelcomparison(g,gsa,l,labelNumeric2Name,'cyl_label')[1]) ) 
    f.close()