Пример #1
0
def node_cluster_stat(index, parameter_list):
	"""
	08-14-05
	"""
	index  = int(index)	#08-14-05	convert it to integer form, cause it's transferred as a string
	from cluster_stat import cluster_stat
	hostname, dbname, schema, source_table, output, gene_table, \
	gene_table_commit, OffsetLimitList, debug = parameter_list
	
	target_table = 'cluster_stat'
	offset, limit = OffsetLimitList[index]
	output = '%s.%s'%(output, index)
	uniformity = 0
	min_node_size = 0
	min_node_depth = 5
	bonferroni = 0
	commit = 0
	report = 0
	log = 0
	wu = 1
	if debug:
		sys.stderr.write("Index: %s; Offset: %s; Limit: %s\n"%(index, offset, limit))
	instance = cluster_stat(hostname, dbname, schema, source_table, target_table, \
		offset, limit, output, bonferroni, report, log, wu, commit, uniformity, \
		min_node_size, min_node_depth)
	instance.run()
	del instance
	
	depth_cut_off = 5
	dir_files = output
	leave_one_out = 1
	wu = 1
	report = 0
	commit = gene_table_commit
	subgraph_cut_off = 0
	debug = 0
	new_table = 0
	recurrence_gap_size = 2
	connectivity_gap_size = 2
	instance = gene_stat(hostname, dbname, schema, target_table, source_table, \
		leave_one_out, wu, report, depth_cut_off, dir_files, commit, gene_table, \
		subgraph_cut_off, debug, new_table, recurrence_gap_size, connectivity_gap_size)
	instance.run()
	del instance
	return output
Пример #2
0
	def run(self):
		"""
		08-14-05
		"""
		communicator = MPI.world.duplicate()
		fake_no_of_nodes = int((communicator.size-1)*times_nodes)	#NOTICE: fake_no_of_nodes is used to enlarge(or shrink) the actual number of nodes,
			#to balance the amount of work on each node
		OffsetLimitList = Numeric.zeros((fake_no_of_nodes,2), Numeric.Int)
		if communicator.rank == 0:
			(conn, curs) = db_connect(self.hostname, self.dbname, self.schema)
			OffsetLimitList = self.createOffsetLimitList(curs, self.source_table, fake_no_of_nodes)
			OffsetLimitList = Numeric.array(OffsetLimitList, Numeric.Int)	#transform it into Numeric array to broadcast()
			if self.commit:	#08-14-05	create the gene_table
				instance = gene_stat()
				instance.createGeneTable(curs, self.gene_table)
				curs.execute('end')
			if self.debug:
				sys.stderr.write("OffsetLimitList: %s"%repr(OffsetLimitList))
			del conn, curs
		
		communicator.broadcast(OffsetLimitList, 0)	#share the OffsetLimitList
		
		mpi_synchronize(communicator)
		job_list = range(len(OffsetLimitList))	#corresponding to the indices in the OffsetLimitList
		parameter_list =[self.hostname, self.dbname, self.schema, self.source_table, self.output, \
			self.gene_table, self.commit, OffsetLimitList, self.debug]
		if self.debug:
			sys.stderr.write("The common parameter_list is %s.\n"%repr(parameter_list))
		of_name_list = mpi_schedule_jobs(communicator, job_list, node_cluster_stat, parameter_list, self.debug)
		
		mpi_synchronize(communicator)
		
		#collecting 08-14-05 not really necessary, but just to make the number of files small
		if communicator.rank==0:
			netmine_wrapper_instance = netmine_wrapper()
			netmine_wrapper_instance.collect_and_merge_output(of_name_list, self.output)
Пример #3
0
	def run(self):
		"""
		09-05-05
		10-23-05
			create views from old schema
			result goes to the new schema's p_gene_table
		
			(input_node)
				--db_connect()
				--form_schema_tables()
				--form_schema_tables()
				--get_gene_no2go_no_set()
				--get_go_no2depth()
				(pass data to computing_node)
			(computing_node)
				(take data from other nodes, 0 and size-1)
			(judge_node)
				--gene_stat()
				--db_connect()
				--gene_p_map_redundancy()
			(output_node)
				--db_connect()
				--form_schema_tables()
				--form_schema_tables()
				--MpiPredictionFilter()
				--MpiPredictionFilter_instance.createGeneTable()
				--get_go_no2edge_counter_list()(if necessary)
				(pass go_no2edge_counter_list to computing_node)
			
			(input_node)
				--fetch_cluster_block()
			(computing_node)
				--get_no_of_unknown_genes()
				--node_fire_handler()
				--cleanup_handler()
			--judge_node()
				--gene_stat_instance.(match functions)
			--output_node()
				--output_node_handler()
					--MpiPredictionFilter_instance.submit_to_p_gene_table()
		"""
		communicator = MPI.world.duplicate()
		node_rank = communicator.rank
		if node_rank == 0:
			(conn, curs) =  db_connect(self.hostname, self.dbname, self.schema)
			"""
			#01-02-06
			old_schema_instance = form_schema_tables(self.input_fname)
			new_schema_instance = form_schema_tables(self.jnput_fname)
			"""
			gene_no2go_no = get_gene_no2go_no_set(curs)
			gene_no2go_no_pickle = cPickle.dumps(gene_no2go_no, -1)	#-1 means use the highest protocol
			go_no2depth = get_go_no2depth(curs)
			go_no2depth_pickle = cPickle.dumps(go_no2depth, -1)
			go_no2gene_no_set = get_go_no2gene_no_set(curs)
			go_no2gene_no_set_pickle = cPickle.dumps(go_no2gene_no_set, -1)
			for node in range(1, communicator.size-2):	#send it to the computing_node
				communicator.send(gene_no2go_no_pickle, node, 0)
				communicator.send(go_no2depth_pickle, node, 0)
				communicator.send(go_no2gene_no_set_pickle, node, 0)
		elif node_rank<=communicator.size-3:	#WATCH: last 2 nodes are not here.
			data, source, tag = communicator.receiveString(0, 0)
			gene_no2go_no = cPickle.loads(data)	#take the data
			data, source, tag = communicator.receiveString(0, 0)
			go_no2depth = cPickle.loads(data)
			data, source, tag = communicator.receiveString(0, 0)
			go_no2gene_no_set = cPickle.loads(data)
			data, source, tag = communicator.receiveString(communicator.size-1, 0)	#from the last node
			go_no2edge_counter_list = cPickle.loads(data)
			#choose a functor for recurrence_array
			functor_dict = {0: None,
				1: lambda x: int(x>=self.recurrence_x),
				2: lambda x: math.pow(x, self.recurrence_x)}
			functor = functor_dict[self.recurrence_x_type]
		elif node_rank == communicator.size-2:	#judge node
			gene_stat_instance = gene_stat(depth_cut_off=self.depth)
			(conn, curs) =  db_connect(self.hostname, self.dbname, self.schema)
			gene_stat_instance.dstruc_loadin(curs)
			from gene_p_map_redundancy import gene_p_map_redundancy
			node_distance_class = gene_p_map_redundancy()			
		elif node_rank==communicator.size-1:	#establish connection before pursuing
			(conn, curs) =  db_connect(self.hostname, self.dbname, self.schema)
			"""
			#01-02-06, input and output are all directed to files
			old_schema_instance = form_schema_tables(self.input_fname)
			new_schema_instance = form_schema_tables(self.jnput_fname)
			MpiPredictionFilter_instance = MpiPredictionFilter()
			MpiPredictionFilter_instance.view_from_table(curs, old_schema_instance.splat_table, new_schema_instance.splat_table)
			MpiPredictionFilter_instance.view_from_table(curs, old_schema_instance.mcl_table, new_schema_instance.mcl_table)
			MpiPredictionFilter_instance.view_from_table(curs, old_schema_instance.pattern_table, new_schema_instance.pattern_table)
			if self.new_table:
				MpiPredictionFilter_instance.createGeneTable(curs, new_schema_instance.p_gene_table)
			"""
			if self.go_no2edge_counter_list_fname:
				go_no2edge_counter_list = cPickle.load(open(self.go_no2edge_counter_list_fname,'r'))
			else:
				if self.eg_d_type==2:
					go_no2edge_counter_list = None
				else:
					gene_no2go_no = get_gene_no2go_no_set(curs)
					go_no2edge_counter_list = get_go_no2edge_counter_list(curs, gene_no2go_no, self.edge_type2index)
			go_no2edge_counter_list_pickle = cPickle.dumps(go_no2edge_counter_list, -1)
			for node in range(1, communicator.size-2):	#send it to the computing_node
				communicator.send(go_no2edge_counter_list_pickle, node, 0)
		
		mpi_synchronize(communicator)
		
		free_computing_nodes = range(1,communicator.size-2)	#exclude the last node
		if node_rank == 0:
			"""
			curs.execute("DECLARE crs CURSOR FOR SELECT id, vertex_set, edge_set, no_of_edges,\
			connectivity, unknown_gene_ratio, recurrence_array, d_matrix from %s"%(old_schema_instance.pattern_table))
			"""
			self.counter = 0	#01-02-06 counter is used as id
			reader = csv.reader(open(self.input_fname, 'r'), delimiter='\t')
			parameter_list = [reader]
			input_node(communicator, parameter_list, free_computing_nodes, self.message_size, \
				self.report, input_handler=self.input_handler)
			del reader
		elif node_rank in free_computing_nodes:
			no_of_unknown_genes = get_no_of_unknown_genes(gene_no2go_no)
			GradientScorePrediction_instance = GradientScorePrediction(gene_no2go_no, go_no2gene_no_set, go_no2depth, \
				go_no2edge_counter_list, no_of_unknown_genes, self.depth, self.min_layer1_associated_genes, \
				self.min_layer1_ratio, self.min_layer2_associated_genes, self.min_layer2_ratio, self.exponent, \
				self.score_list, self.max_layer, self.norm_exp, self.eg_d_type, self.debug)
			parameter_list = [GradientScorePrediction_instance, functor]
			computing_node(communicator, parameter_list, self.node_fire_handler, self.cleanup_handler, self.report)
		elif node_rank == communicator.size-2:
			self.judge_node(communicator, curs, gene_stat_instance, node_distance_class)
		elif node_rank==communicator.size-1:
			#01-02-06 output goes to plain file, not database
			writer = csv.writer(open(self.jnput_fname, 'w'), delimiter='\t')
			parameter_list = [writer]
			output_node(communicator, free_computing_nodes, parameter_list, self.output_node_handler, self.report)
			del writer