示例#1
0
        print(i)

    cat = Cat("kitty")
    cat.add_trick("meow!!")
    dog = Dog("puppy")
    dog.add_trick("ruff")

    print(cat.get_name(), "abc")

    print(dog.get_name() + " " + str(dog.get_tricks()))

    i = 0

    print " 10 mod 3 =" + str(10 / 3)

    dataSet = readDataSets('/home/grant/Downloads/phd_data/AMC2017-2018.csv')

    print dataSet.nextBatch(3)

    print dataSet.nextBatch(3)

    tradingData = readDataSets(
        '/home/grant/Downloads/phd_data/AMC2000-2012.csv')

    batch_xs, batch_ys = tradingData.nextLstmBatch(100)

    print "nextLstmBatch"

    print batch_xs

    print batch_ys
示例#2
0
    model.compile(loss='mean_absolute_error',
                  optimizer=myOptimizer,
                  metrics=['accuracy', mean_pred, earning_pred])

    return model


stocks = ['AMC', 'ANZ', 'BHP', 'CBA', 'NAB', 'GPT', 'CIM', 'CCL']

priceIncrease = 1.005

for stock in stocks:
    print stock

    tradingData = readDataSets(
        '/home/grant/Downloads/phd_data/' + stock + '2000-2012.csv',
        priceIncrease)

    epochSize = 60
    batchSize = 50
    batch_xs, batch_ys = tradingData.nextBatch(batchSize * epochSize)

    batch_xs = numpy.reshape(batch_xs, [epochSize, batchSize, 100])
    batch_ys = numpy.reshape(batch_ys, [epochSize, batchSize, 1])

    #print batch_ys;

    model = buildModel()

    history = model.fit(batch_xs,
                        batch_ys,
示例#3
0
def main(_):	
	
	#AMC, ANZ, BHP, CBA, NAB, GPT, CIM, CCL
	stock = "CBA"
	
	# Import data
	tradingData = readDataSets('/home/grant/Downloads/phd_data/'+stock+'2000-2012.csv')
	
	# Create the model
	x = tf.placeholder(tf.float32, [None, 100], "tradingPrices")
	W = tf.Variable(tf.zeros([100, 1]), name='priceWeight')
	b = tf.Variable(tf.zeros([1]), name='bias')
	y = tf.sigmoid(tf.matmul(x, W) + b)

	# Define loss and optimizer
	y_ = tf.placeholder(tf.float32, [None,1], "probHigherPrice")

	# The raw formulation of cross-entropy,
	#
	#	 tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y)),
	#																 reduction_indices=[1]))
	#
	# can be numerically unstable.
	#
	# So here we use tf.losses.sparse_softmax_cross_entropy on the raw
	# outputs of 'y', and then average across the batch.
	#cross_entropy = tf.losses.mean_squared_error(y_, y)
	#cross_entropy = tf.losses.softmax_cross_entropy(y_,y);
	cross_entropy = tf.reduce_sum(tf.abs(tf.subtract(y_, y)))
	train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
	
	correct_prediction = tf.equal( tf.cast(tf.round(y), tf.int32), tf.cast(tf.round(y_), tf.int32))
	accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
	
	tf.summary.scalar('accuracy', accuracy)
	tf.summary.scalar('cross_entropy', cross_entropy)
	merged = tf.summary.merge_all()
	
	with tf.Session() as sess:
		train_writer = tf.summary.FileWriter('/tmp/train', sess.graph)
		tf.global_variables_initializer().run()
		# Train
		for i in range(60):
			batchSize = 50;
			batch_xs, batch_ys = tradingData.nextBatch(batchSize)
			batch_xs = numpy.reshape(batch_xs, [batchSize, 100])
			batch_ys = numpy.reshape(batch_ys, [batchSize, 1])
			sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})	
			summary, entropy = sess.run([merged,cross_entropy], feed_dict={x: batch_xs, y_: batch_ys})		
			#print(entropy)
			#print (sess.run( cross_entropy , feed_dict={x: batch_xs, y_: batch_ys}))
			train_writer.add_summary(summary, i)	
		# Test trained model
		
		train_writer.close()
		
		print(W.eval())
		testData = readDataSets('/home/grant/Downloads/phd_data/'+stock+'2013-2017.csv');
		test_xs, test_ys = testData.nextBatch(200)
		test_xs = numpy.reshape(test_xs, [200, 100])
		test_ys = numpy.reshape(test_ys, [200, 1])
		#print (test_xs)
		#print(sess.run(tf.round(test_ys)))
		
		print(sess.run(tf.abs(y), feed_dict={x: test_xs,y_:test_ys}))