def runNN (train_x, train_y, test_x, test_y, numHidden, numEpochs = NUM_EPOCHS):
	print "NN({})".format(numHidden)
	with tf.Graph().as_default():
		session = tf.InteractiveSession()

		x = tf.placeholder("float", shape=[None, train_x.shape[1]])
		y_ = tf.placeholder("float", shape=[None, train_y.shape[1]])

		W1 = makeVariable([train_x.shape[1],numHidden], stddev=0.5, wd=1e1, name="W1")
		b1 = makeVariable([numHidden], stddev=0.5, wd=1e1, name="b1")
		W2 = makeVariable([numHidden,train_y.shape[1]], stddev=0.5, wd=1e0, name="W2")

		#level1 = tf.nn.relu(tf.matmul(x,W1) + b1)
		level1 = tf.matmul(x,W1) + b1
		y = tf.nn.softmax(tf.matmul(level1,W2))

		cross_entropy = -tf.reduce_mean(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)), name='cross_entropy')
		tf.add_to_collection('losses', cross_entropy)
		total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

		train_step = tf.train.MomentumOptimizer(learning_rate=.001, momentum=0.01).minimize(total_loss)
		#train_step = tf.train.AdamOptimizer(learning_rate=.001).minimize(total_loss)

		session.run(tf.initialize_all_variables())
		for i in range(numEpochs):
			offset = i*BATCH_SIZE % (train_x.shape[0] - BATCH_SIZE)
			train_step.run({x: train_x[offset:offset+BATCH_SIZE, :], y_: train_y[offset:offset+BATCH_SIZE, :]})
			if i % 500 == 0:
				util.showProgress(cross_entropy, x, y, y_, test_x, test_y)
		session.close()
def runNN (train_x, train_y, test_x, test_y, numHidden):
	print "NN({})".format(numHidden)
	session = tf.InteractiveSession()

	x = tf.placeholder("float", shape=[None, train_x.shape[1]])
	y_ = tf.placeholder("float", shape=[None, 2])

	W1 = tf.Variable(tf.truncated_normal([train_x.shape[1],numHidden], stddev=0.01))
	b1 = tf.Variable(tf.truncated_normal([numHidden], stddev=0.01))
	W2 = tf.Variable(tf.truncated_normal([numHidden,2], stddev=0.01))
	b2 = tf.Variable(tf.truncated_normal([2], stddev=0.01))

	z = tf.nn.relu(tf.matmul(x,W1) + b1)
	y = tf.nn.softmax(tf.matmul(z,W2) + b2)

	cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
	#cross_entropy = -tf.reduce_sum(y_*tf.log(y))
	train_step = tf.train.MomentumOptimizer(learning_rate=.001, momentum=0.1).minimize(cross_entropy)
	#train_step = tf.train.AdamOptimizer(learning_rate=.01).minimize(cross_entropy)

	session.run(tf.initialize_all_variables())
	for i in range(NUM_EPOCHS):
		offset = i*BATCH_SIZE % (train_x.shape[0] - BATCH_SIZE)
		train_step.run({x: train_x[offset:offset+BATCH_SIZE, :], y_: makeLabels(train_y[offset:offset+BATCH_SIZE])})
		if i % 100 == 0:
			util.showProgress(cross_entropy, x, y, y_, test_x, test_y)
	session.close()
def runLLL_NN (all_train_x, all_train_y, all_test_x, all_test_y, numHidden, courseIds):
	n = len(courseIds)
	with tf.Graph().as_default():
		session = tf.InteractiveSession()

		# Initialize all variables
		xs = []
		ys_ = []
		collectionNames = ["losses_{}".format(i) for i in range(n) ]
		for i in range(n):
			xs.append(tf.placeholder("float", shape=[None, all_train_x[i].shape[1]]))
			ys_.append(tf.placeholder("float", shape=[None, all_train_y[i].shape[1]]))
		W1 = makeVariable([all_train_x[0].shape[1],numHidden], stddev=0.5, wd=1e1, name="W1", collectionNames=collectionNames)
		b1 = makeVariable([numHidden], stddev=0.5, wd=1e1, name="b1", collectionNames=collectionNames)
		W2s = []
		level1s = []
		level2s = []
		ys = []
		for i in range(n):
			level1s.append(tf.matmul(xs[i],W1) + b1)
			W2s.append(makeVariable([numHidden,all_train_y[i].shape[1]], stddev=0.5, wd=1e1, name="W2_{}".format(i), collectionNames=["losses_{}".format(i)]))
			level2s.append(tf.matmul(level1s[i],W2s[i]))
			#level2s.append(tf.matmul(tf.nn.relu(level1s[i]), W2s[i]))
			ys.append(tf.nn.softmax(level2s[i]))

		# Initialize loss functions
		cross_entropies = []
		total_losses = []
		optimizers = []
		global_steps = []
		for i in range(n):
			cross_entropies.append(-tf.reduce_mean(ys_[i]*tf.log(tf.clip_by_value(ys[i],1e-10,1.0)), name="cross_entropy_{}".format(i)))
			tf.add_to_collection("losses_{}".format(i), cross_entropies[i])
			total_losses.append(tf.add_n(tf.get_collection("losses_{}".format(i)), name="total_losses_{}".format(i)))
			global_steps.append(tf.Variable(0, trainable=False))
			learning_rate = tf.train.exponential_decay(0.001, global_steps[i], 1000, 0.98)
			optimizers.append(tf.train.MomentumOptimizer(learning_rate, momentum=0.1).minimize(total_losses[i]))

		session.run(tf.initialize_all_variables())
		for i in range(NUM_EPOCHS):
			if i % 500 == 0:
				print "..."
			for j in range(n):
				offset = i*BATCH_SIZE % (all_train_x[j].shape[0] - BATCH_SIZE)
				optimizers[j].run({xs[j]: all_train_x[j][offset:offset+BATCH_SIZE, :], ys_[j]: all_train_y[j][offset:offset+BATCH_SIZE, :]})
				if i % 500 == 0:
					util.showProgress(total_losses[j], xs[j], ys[j], ys_[j], all_test_x[j], all_test_y[j])
					#util.showProgress(cross_entropies[j], xs[j], ys[j], ys_[j], all_train_x[j], all_train_y[j])
		session.close()