Ejemplo n.º 1
0
def rankPop(chromos):
    errors, results = [], []
    # print chromos
    for chromo in chromos:
        selectedKeys = []
        for i in range(len(chromo)):
            if chromo[i] == 1:
                selectedKeys.append(keys[i])
        selectedKeys.append('"PRICE"')
        scores = myownq.runInnerLoop(selectedKeys, myownq.qAgent())
        if scores[0] <= 0:
            errors.append(1)
        else:
            errors.append(scores[0])
        results.append(scores)
        # print scores
        # print errors

    fitnessScores = calcFitness(errors)  # calc fitness scores from the erros calculated
    pairedPop = zip(
        chromos, errors, results, fitnessScores
    )  # pair each chromo with its protein, ouput and fitness score
    rankedPop = sorted(
        pairedPop, key=operator.itemgetter(-1), reverse=True
    )  # sort the paired pop by ascending fitness score
    return rankedPop
Ejemplo n.º 2
0
						stateDef += [j]
						break
		self.stateDef = tuple(stateDef)
		#print (self.stateDef)
	def getScore(self):
		if (self.newPrice == None or self.curPrice == None):
			return 0.0
		priceChange = float(self.newPrice - self.curPrice) 
		return priceChange
	#-1 = predict lower price, 1 = predict higher price
	def getLegalActions(self):
		return [-1,1]
	def getDef(self):
		return self.stateDef

agent = myownq.qAgent()
#The function that runs the inner Q-learning
def run():
	initialized = False
	# open data row by row to avoid memory overflow
	fname = 'data_cleaned.csv'
	with open(fname, 'r+') as f:
		# this reads in one line at a time from stdin
		date_previous = None 
		ticker_previous = None

		observation = {}
		observationPrevious = {}
		isFirstObservation = True

		lineo = 0
Ejemplo n.º 3
0
			hashVal = hash(tuple(selectedKeys))
			if (hashVal not in visited):
				repeatedState = False
			if (numRepeatedStates >=1000):
				STOPIT = True
				break
		if (STOPIT):
			break
	print ("FINAL SOLUTION")
	print (maxState)

def annealSchedule(delta, time):
	return math.exp(float(delta) / float(100001 - time)**0.4)

simulatedAgent = myownq.qAgent()

#The function that runs the inner Q-learning
def runSimulatedAnnealing():
	initialized = False
	#no price in these keys
	keys = ['"ACCOCI"', '"ASSETS"', '"ASSETSC"', '"ASSETSNC"', '"BVPS"', '"CAPEX"', '"CASHNEQ"', '"COR"', '"CURRENTRATIO"', '"DE"', '"DEBT"', '"DEPAMOR"', '"DILUTIONRATIO"', '"DPS"', '"EBIT"', '"EBITDA"', '"EBT"', '"EPS"', '"EPSDIL"', '"EQUITY"', '"FCF"', '"FCFPS"', '"GP"', '"INTANGIBLES"', '"INTEXP"', '"INVENTORY"', '"LIABILITIES"', '"LIABILITIESC"', '"LIABILITIESNC"', '"NCF"', '"NCFCOMMON"', '"NCFDEBT"', '"NCFDIV"', '"NCFF"', '"NCFI"', '"NCFO"', '"NCFX"', '"NETINC"', '"NETINCCMN"', '"NETINCDIS"', '"PAYABLES"', '"PB"', '"PREFDIVIS"', '"RECEIVABLES"', '"RETEARN"', '"REVENUE"', '"RND"', '"SGNA"', '"SHARESWA"', '"SHARESWADIL"', '"TANGIBLES"', '"TAXEXP"', '"TBVPS"', '"WORKINGCAPITAL"']
	selectedKeys = random.sample(keys, 10)
	selectedKeys += ['"PRICE"']
	remainingKeys = [key for key in keys if key not in selectedKeys]
	hashVal = hash(tuple(selectedKeys))
	fScoreCorrect = dict()
	fScoreRewards = dict()
	previousState = dict()
	visited = []
	maxState = dict()
Ejemplo n.º 4
0
def runSimulatedAnnealing():
	initialized = False
	#no price in these keys
	selectedKeys = random.sample(keys, 10)
	selectedKeys += ['"PRICE"']
	remainingKeys = [key for key in keys if key not in selectedKeys]
	hashVal = hash(tuple(selectedKeys))
	fScoreCorrect = dict()
	fScoreRewards = dict()
	previousState = dict()
	visited = []
	maxState = dict()
	maxAgent = None
	testing = False

	#run the iterations
	for x in range(1,100000):
		# if (x % 100 == 0):
		# 	print x
		# if (x < 10):
		result = myownq.runInnerLoop(selectedKeys, myownq.qAgent())
		# else:
		# 	if (not testing):
		# 		#print maxState
		# 		testing = True
		# 	maxAgent = maxState["agent"]
		# 	selectedKeys = maxState["selected"]
		# 	result = myownq.runTestLoop(selectedKeys, maxAgent)
		# 	#print "RESULT"
		# 	#print result
		# 	#print maxState

		fScoreRewards[hashVal] = result[0]
		fScoreCorrect[hashVal] = result[1]
		agent = result[2]
		visited.append(hashVal)

		move = False
		#move if we are on the first step
		if (len(previousState.keys()) == 0):
			move = True
		else:
			delta = fScoreRewards[hashVal] - fScoreRewards[previousState["hash"]]
			#move if we move to a better state
			if (delta >= 0):
				move = True
			#move with a probablity defined by the anneal schedule function
			else:
				move = random.random() <= annealSchedule(delta, x)

		if (len(previousState.keys()) == 0 or move):
			previousState["hash"] = hashVal
			previousState["selected"] = selectedKeys
			previousState["remaining"] = remainingKeys
			# print (fScoreRewards[hashVal])
			# print (fScoreCorrect[hashVal])
			# print x
			if (len(maxState.keys()) == 0 or fScoreRewards[hashVal] > maxState["score"]):
				maxState["hash"] = hashVal
				maxState["selected"] = selectedKeys
				maxState["remaining"] = remainingKeys
				maxState["score"] = fScoreRewards[hashVal]
				maxState["correct"] = fScoreCorrect[hashVal]
				maxState["agent"] = agent

		repeatedState= True
		numRepeatedStates = 0
		#avoid repeating states
		while (repeatedState):
			numRepeatedStates += 1
			hashVal = previousState["hash"]
			selectedKeys = previousState["selected"]
			remainingKeys = previousState["remaining"]
			#either randomly remove a fundamental, or add one
			#don't want too few fundamentals, so only remove if there are more than 4
			#also don't want too many, so limit it at like 45
			if ((len(selectedKeys) > 4 and util.flipCoin(0.5)) or len(selectedKeys) > 45):
				randomKey = random.sample(selectedKeys, 1)[0]
				while (randomKey == '"PRICE"'):
					randomKey = random.sample(selectedKeys, 1)[0]
				selectedKeys.remove(randomKey)
				remainingKeys.append(randomKey)
			else:
				randomKey = random.sample(remainingKeys, 1)[0]
				selectedKeys.append(randomKey)
				remainingKeys.remove(randomKey)

			hashVal = hash(tuple(selectedKeys))
			if (hashVal not in visited):
				repeatedState = False
			if (numRepeatedStates >=1000):
				selectedKeys = random.sample(keys, 10)
				selectedKeys += ['"PRICE"']
				remainingKeys = [key for key in keys if key not in selectedKeys]

	# print ("FINAL SOLUTION")
	print (maxState)