print in_to_hidden.params
print hidden_to_out.params
print n.params
print n.activate([1, 2])

# Naming your NN
print LinearLayer(2).name
LinearLayer(2, name='foo')
print LinearLayer(2).name

# Using Recurrent NN
n = RecurrentNetwork()

n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

# Looks back in time one timestep
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

# Using RNN, every steps gets different value of Neron
n.sortModules()
print n.activate([2, 2])
print n.activate([2, 2])
print n.activate([2, 2])
n.reset()  # Clear n and Reset it
print n.activate([2, 2])
print n.activate([2, 2])
print n

#Recureent Connection Class -which looks back in time one timestep.
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))

n.sortModules()
print n.activate((2, 2))
print n.activate((2, 2))
print n.activate((2, 2))
n.reset()
print n.activate((2, 2))

#######################################
#########   Classification with feed forward networks

# ######################  graphical output ########################

#To have a nice dataset for visualization,
# we produce a set of points in 2D belonging to three different classes.
means = [(-1, 0), (2, 4), (3, 1)]
cov = [diag([1, 1]), diag([0.5, 1.2]), diag([1.5, 0.7])]
alldata = ClassificationDataSet(2, 1, nb_classes=3)
for n in xrange(400):
    for klass in range(3):
        input = multivariate_normal(means[klass], cov[klass])
示例#3
0
n.addModule(hiddenLayer)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer, hiddenLayer)
hidden_to_out = FullConnection(hiddenLayer, outLayer)
n.addConnection(in_to_hidden)
n.addConnection(hidden_to_out)

# this is required to make the MLP usable 
n.sortModules()

print n.activate((2,2)) # forward pass
print 'n.params\n', n.params # all weights

# same but for recurrent network
n = RecurrentNetwork()
n.addInputModule(LinearLayer(2, name='in'))
n.addModule(SigmoidLayer(3, name='hidden'))
n.addOutputModule(LinearLayer(1, name='out'))
n.addConnection(FullConnection(n['in'], n['hidden'], name='c1'))
n.addConnection(FullConnection(n['hidden'], n['out'], name='c2'))
n.addRecurrentConnection(FullConnection(n['hidden'], n['hidden'], name='c3'))

n.sortModules()


print n.activate((2,2)) # forward pass
print n.activate((2,2)) # forward pass
print n.activate((2,2)) # forward pass
print n.reset(), '\nafter reset'
print n.activate((2,2)) # forward pass
示例#4
0
n.addInputModule(inLayer)
n.addModule(hiddenLayer)
n.addModule(biasinUnit)
n.addModule(biasoutUnit)
n.addOutputModule(outLayer)
in_to_hidden = FullConnection(inLayer,hiddenLayer)
bias_to_hidden = FullConnection(biasinUnit,hiddenLayer)
bias_to_out = FullConnection(biasoutUnit,outLayer)
hidden_to_out = FullConnection(hiddenLayer,outLayer)
n.addConnection(in_to_hidden)
n.addConnection(bias_to_hidden)
n.addConnection(bias_to_out)
n.addConnection(hidden_to_out)

n.sortModules()
n.reset()

#read the initail weight values from myparam2.txt
filetoopen = os.path.join(os.getcwd(),'myparam2.txt')
if os.path.isfile(filetoopen):
  myfile = open('myparam2.txt','r')
  c=[]
  for line in myfile:
    c.append(float(line))
  n._setParameters(c)
else:
  myfile = open('myparam2.txt','w')
  for i in n.params:
    myfile.write(str(i)+'\n')
myfile.close()
def ANN_edge_analysis(a_network, a_gene, a_dataset, boot_val):

	"Creates and trains a network that is created to reflect the structure of the hypothesized network"

	regulatory_network = FeedForwardNetwork()

	# retrievingneeded parameters from the input network

	data_node_list = get_sub_list_from_network(a_network, a_gene, "gene,TF", 1)

	# Need to add +1 node to the input layer that represents the "other" control variables

	# describing network modules to be used
	inLayer = LinearLayer(len(data_node_list)-1)
	#hiddenLayer = LinearLayer(len(data_node_list)-1))
	outLayer = LinearLayer(1)


	# Adding layers to network
	regulatory_network.addInputModule(inLayer)
	#regulatory_network.addModule(hiddenLayer)
	regulatory_network.addOutputModule(outLayer)

	# Adding connections between layers

	#in_to_hidden = LinearConnection(inLayer,hiddenLayer)
	#hidden_to_out = FullConnection(hiddenLayer, outLayer)

	in_to_out = FullConnection(inLayer, outLayer)

	#regulatory_network.addConnection(in_to_hidden)
	#regulatory_network.addConnection(hidden_to_out)

	regulatory_network.addConnection(in_to_out)

	get_nn_details(regulatory_network)

	# Other stuff added

	regulatory_network.sortModules()

	# Formatting the dataset 

	input_dimention = len(data_node_list)-1
	print "in_dimention = ", input_dimention

	DS = SupervisedDataSet( input_dimention, 1 )

	# Adding data, there may be a problem with order here where tfs are not always the same... seems ok though

	for experiment in a_dataset:
		tf_list = []
		gene_list = []
		tf_labels = []
		for TF in data_node_list:
			if TF != a_gene:
				#print TF, "<---"
				tf_list.append(experiment[TF])
				tf_labels.append(TF)
			else:
				#print TF, "<---gene"
				gene_list.append(experiment[TF])

		print tf_list
		print gene_list


		if (check_missing_experiments(tf_list) == True) and (check_missing_experiments(gene_list) == True):
			float_tf_list = [float(i) for i in tf_list]
			float_gene_list = [float(i) for i in gene_list]
			DS.appendLinked( float_tf_list, float_gene_list )

	print "......"

	print DS

	# Training
	trainer = BackpropTrainer(regulatory_network, momentum=0.1, verbose=True, weightdecay=0.01)

	trainer.setData(DS)

	result_list = []

	boot_count = 0
	while boot_count < boot_val:
		#trainer.trainEpochs(1000)
		trainer.trainUntilConvergence(validationProportion=0.25)
		print regulatory_network
		this = get_nn_details(regulatory_network)
		result_list.append(this)
		regulatory_network.reset()
		boot_count += 1

	print tf_labels
	print regulatory_network.params
	print in_to_out.params
	print inLayer
	pesos_conexiones(regulatory_network)

	NetworkWriter.writeToFile(regulatory_network, 'trained_net.xml')
	return result_list
def ANN_blind_analysis(a_network, a_gene, a_dataset, boot_val):

	"Creates and trains a network that is created to reflect the structure of the hypothesized network"

	regulatory_network = FeedForwardNetwork()

	# retrieving needed parameters from the input network

	upper_case_data_node_list = get_sub_list_from_network(a_network, a_gene, "gene,TF", 1)

	# to lower case for everything
	data_node_list = [x.lower() for x in upper_case_data_node_list]
	a_gene = a_gene.lower()

	# If the target gene is also a TF, remove it from the list as it will be added
	if a_gene in data_node_list: data_node_list.remove(a_gene)

	print 'what is in data_node_list:'
	print data_node_list

	if len(data_node_list) == 0:
		print "No connections to " + a_gene + " found."
		return [a_gene, '0', '0']


	# Check for missing entries in the dataset (DS)
	# For the main gene

	#print a_gene
	#print a_dataset[0].keys()
	
	# Check for missing entries in the dataset (DS)
    # For the main gene
	if a_gene not in a_dataset[0].keys():
		#print 'herp'
		return [a_gene, '0', '0']
	
	# For the linked genes

	for each_gene in data_node_list:
		if each_gene not in a_dataset[0].keys():
			data_node_list.remove(each_gene)

	if len(data_node_list) == 0:
		print "No connections to " + a_gene + " found."
		return [a_gene, '0', '0']

	print len(data_node_list)
	print data_node_list

	# Need to add +1 node to the input layer that represents the "other" control variables

	# describing network modules to be used
	inLayer = LinearLayer(len(data_node_list), name="Input_layer")
	
	hiddenLayer = SigmoidLayer(len(data_node_list) + 1, name="Hidden_sigmoid_layer_1")
	
	outLayer = LinearLayer(1, name="Output_layer")


	# Adding layers to network
	regulatory_network.addInputModule(inLayer)

	regulatory_network.addModule(hiddenLayer)

	regulatory_network.addOutputModule(outLayer)

	# Adding connections between layers

	in_to_hidden = FullConnection(inLayer, hiddenLayer)

	hidden_to_out = FullConnection(hiddenLayer, outLayer)


	regulatory_network.addConnection(in_to_hidden)

	regulatory_network.addConnection(hidden_to_out)


	get_nn_details(regulatory_network)

	# Other stuff added

	regulatory_network.sortModules()

	# Formatting the dataset 

	input_dimention = len(data_node_list)
	print "in_dimention = ", input_dimention

	DS = SupervisedDataSet( input_dimention, 1 )

	# Adding data, there may be a problem with order here where tfs are not always the same... seems ok though


	# This may not be the best way, but is needed due to the next for statement
	data_node_list.append(a_gene)
	print 'node list contains: '
	print data_node_list

	# This is where the ordered dict needs to be used to link the input name to the input node.

	for experiment in a_dataset:
		tf_list = []
		gene_list = []
		tf_labels = []
		first_round = True
		for TF in data_node_list:
			if TF != a_gene:
				#print TF, "<---"
				tf_list.append(experiment[TF])
				if first_round == True:
					tf_labels.append(TF)
			else:
				#print TF, "<---gene"
				gene_list.append(experiment[TF])
		first_round = False
		# View the input data sets
		print tf_labels
		print tf_list
		print gene_list


		if (check_missing_experiments(tf_list) == True) and (check_missing_experiments(gene_list) == True):
			float_tf_list = [float(i) for i in tf_list]
			float_gene_list = [float(i) for i in gene_list]
			DS.appendLinked( float_tf_list, float_gene_list )

	print "......"
	print 'Network before training'
	print regulatory_network

	pesos_conexiones(regulatory_network)
	print regulatory_network.outputerror

	#print DS

	# Training
	trainer = RPropMinusTrainer_Evolved(regulatory_network, verbose=False)

	trainer.setData(DS)

	result_list = []
	best_run_error = 1000

	boot_count = 0
	while boot_count < boot_val:
		print '\n'
		print 'Bootstrap round ' + str(boot_count + 1)
		trainer.trainEpochs(500)
		this = get_nn_details(regulatory_network)
		# Corrected error
		
		print trainer.total_error
		current_run_error = trainer.total_error
		

		
		print 'Bootstrap round ' + str(boot_count + 1) + ' error: ' + str(current_run_error)

		if abs(current_run_error) < abs(best_run_error):
			best_run_error = current_run_error
			trained_net_filename = a_gene + '_trained_net.xml'
			NetworkWriter.writeToFile(regulatory_network, trained_net_filename)

			export_to_gml(regulatory_network, tf_labels, a_gene)

		#result_list.append(this)
		regulatory_network.reset()
		regulatory_network.randomize()
		trainer = RPropMinusTrainer_Evolved(regulatory_network, verbose=False)
		trainer.setData(DS)
		boot_count += 1

	#print "TF Labels"
	#print tf_labels
	#print regulatory_network.params
	#print inLayer
	#print "Pesos Conexiones"
	#pesos_conexiones(regulatory_network)

	#print dir(regulatory_network)
	#print dir(trainer)
	#print 'look here'
	#print regulatory_network.outputerror
	#print '<><><><><>'
	#print dir(regulatory_network['SigmoidLayer-7'])
	#print '\n'
	#print vars(regulatory_network['SigmoidLayer-7'])
	#print '\n'
	#print regulatory_network['SigmoidLayer-7'].forward
	#print regulatory_network['SigmoidLayer-7'].bufferlist

	result_list.append(a_gene)

	result_list.append(best_run_error)

	result_list.append(len(tf_list))

	return result_list