def FileRead(addr): FILE = open(addr,'r') fileString = FILE.read() print("file being read") stringList = fileString.split('\n') states = [] searchMode = int(stringList[0]) print('search mode ', searchMode) print(stringList[1],' is looking for' , stringList[2]) sourceName = stringList[1] targetName = stringList[2] stateCount = int(stringList[3]) print('total number of states ',stateCount) for i in range(4,4+stateCount): temp = State() temp.name = stringList[i] states.append(temp) for i in range(0,stateCount): row = stringList[4+stateCount+i].split(' ') for j in range(0,stateCount): if int(row[j]) != 0: states[i].connections.append((states[j],int(row[j]))) print(states[i].name , 'is friends with',states[j].name,"with the cost of",row[j]) game = Game() game.states = states game.Mode = searchMode game.root = sourceName game.target = targetName return game
def init_game_alphabeta(grid=1): grid = gr.create_grid(grid) start_location = gr.get_start_location() game = Game(grid, start_location) score, moves = alphabeta(game) if score < game.max_steps(): print("Score:", score) else: print("Score: infinity") return score, moves
def correlate_all_data(): sp500Game = Game() sp500Game.load_stocks('under10') averagePrices = np.array([stats.mean(sp500Game.allStocks[stock].dataPerTime['close']) for stock in sp500Game.allStocks], dtype=np.float64) averageVolumes = np.array([stats.mean(sp500Game.allStocks[stock].dataPerTime['volume']) for stock in sp500Game.allStocks], dtype=np.float64) bestFitSlopes = np.array([best_fit_slope(np.array(list(range(len(sp500Game.allStocks[stock].dataPerTime['close']))), dtype=np.float64), np.array(sp500Game.allStocks[stock].dataPerTime['close'], dtype=np.float64)) for stock in sp500Game.allStocks], dtype=np.float64) print(np.corrcoef(averagePrices, averageVolumes)) print(np.corrcoef(averagePrices, bestFitSlopes)) print(np.corrcoef(averageVolumes, bestFitSlopes))
def __init__(self, starting_tech_positions, colonies, reserve=200): Game.__init__(self, starting_tech_positions, colonies, reserve) # variables to store possible actions from current game state self.col_distributions_list = [] self.building_choices_list = [] self.res_choices_list = [] # compute initial actions self.building_choices() self.colonist_distributions() self.research_choices()
def play_game(): theGame = Game() theGame.name = input("What is your name? ") theGame.load_stocks(input("What type of Stocks? ")) theGame.next_day() print('What would you like to do', theGame.name, "?") playerInput = input() while (playerInput != '4'): theGame.action(playerInput) print('What would you like to do', theGame.name, "?") playerInput = input() ######################################################################################################
def display_data(type): # Just for visual practice sp500Game = Game() sp500Game.load_stocks(type) numOfGraphs = 6 i = 1 for stock in sp500Game.allStocks: daMean = [] counter = 1 meanRange = 500 for minute in sp500Game.allStocks[stock].dataPerTime['close']: if counter >= meanRange: daMean.append(stats.mean(sp500Game.allStocks[stock].dataPerTime['close'][counter - meanRange:counter])) counter += 1 else: daMean.append(stats.mean(sp500Game.allStocks[stock].dataPerTime['close'][0:counter])) counter += 1 xs = np.array(list(range(len(sp500Game.allStocks[stock].dataPerTime['close']))), dtype=np.float64) ys = np.array(sp500Game.allStocks[stock].dataPerTime['close'], dtype=np.float64) m = best_fit_slope(xs,ys) b = best_fit_intercept(xs,ys,m) bestFitLine = [(m*x)+b for x in xs] plt.figure(i) plt.plot(bestFitLine, 'b') plt.plot(sp500Game.allStocks[stock].dataPerTime['close'], 'r') plt.plot(daMean, 'b') plt.ylabel('Price') plt.xlabel('Minutes') plt.title(stock + " " + str(int(stats.mean(sp500Game.allStocks[stock].dataPerTime['volume'])))) if i == numOfGraphs: plt.show() i = 1 else: i += 1
def load_data(filename="tagged_nfl_game_rows.csv"): all_data = [] with open(filename, 'rb') as csvfile: reader = csv.DictReader(csvfile, delimiter=',', quotechar='|') for row in reader: for i in row.keys(): if len(row[i]) == 0: row[i] = None elif (row[i].isdigit()): row[i] = int(row[i]) elif (i == "Over/Under"): row[i] = float(row[i][0:4]) temp = Game(row["tag"], row) all_data.append(temp) return all_data
import pygame from GameClass import Game pygame.init() init = pygame.font.init() new_game = Game() new_game.game_loop() pygame.quit()
# *********************************************************************** # *********** CHANGE NAVIGATOR FROM GREEDY TO INFORMED HERE ************* # *********************************************************************** # Choose which navigator to use which_navigator = 1 # 0 = greedy_navigator; 1 = informed_navigator plot_trajectories = False if which_navigator == 0: navigator = GreedyNavigator() plot_title = 'Greedy Navigator trajectory' else: navigator = InformedNavigator() plot_title = 'Informed Navigator trajectory' # Create a Game object, providing it with the map data, the goal location of the map, the navigator, and the robot game = Game(data, map.number, navigator, robot) # This loop runs the game for 1000 ticks, stopping if a goal is found. all_scores = [] all_times = [] for trial in range(10): start_time = time.time() map.getNewMap() data = map.map navigator.reset() robot.resetRobot() game = Game(data, map.number, navigator, robot) for x in range(0, 1000): found_goal = game.tick() print( f"{game.getIteration()}: Robot at: {robot.getLoc()}, Score = {game.getScore()}"
# Goal Location if (0<=map.number <=2): goalLoc = (0, 27) wrongGoals = [[27,27], [27,0]] elif (3<=map.number <=5): goalLoc = (27, 27) wrongGoals = [[0,27], [27,0]] elif (6<=map.number <=9): goalLoc = (27, 0) wrongGoals = [[0,27], [27,27]] game = Game(data, goalLoc, navigator, robot, temprobot) # this takes one action in the world and then updates the explored area of the map, also tell if the goal is reached or not ''' Robot looks at one step ahead, accordingly map gets explored and masked in those corresponding positions, gets the prediction of the masked and explored map from the Neural network and then goes in the direction which gives the maximum information, among all valid movements. The pixels in the next step can be in between 0 or 255. ''' uNet = WorldEstimatingNetwork() classNet = DigitClassifcationNetwork() reward = 0 mask = np.zeros((28, 28)) tempmask = np.zeros((28, 28)) # intialize movement to update the mask and exploredMap
sub_plt = fig.add_subplot(111) all_goals = [[0, 27], [27, 27], [27, 0]] rgb = np.zeros((28, 28, 3), 'uint8') rgb[..., 0] = data rgb[..., 1] = data rgb[..., 2] = data plt_handler = sub_plt.imshow(rgb, cmap='gray') reward_mask_map = np.zeros((28, 28)) reward_mask_map[0, 0] = -1 mask_map[0, 0] = 1 # get the class of robot to navigate on the map navigator = RandomNavigator() # creating the main objects for this game game = Game(data, goal, navigator, robot) for i in range(14): for j in range(1): act = 'up' robot.move(act) mask_map[robot.getLoc()[1], robot.getLoc()[0]] = 1 act = 'right' robot.move(act) mask_map[robot.getLoc()[1], robot.getLoc()[0]] = 1 while True: best_info_qual = np.min(reward_mask_map) - 100 best_action = '' # check if there is info_qual at the initially and move to goal if available image = uNet.runNetwork(data, mask_map) char = classNet.runNetwork(image)[0]
from GameClass import Game from RandomNavigator import RandomNavigator from networkFolder.functionList import Map,WorldEstimatingNetwork,DigitClassifcationNetwork map = Map() data = map.map print(map.number) robot = Robot(0,0) navigator = RandomNavigator() game = Game(data,(27,27),navigator,robot) for x in range(0,1000): print (robot.xLoc,robot.yLoc) game.tick() im = Image.fromarray(np.uint8(game.exploredMap)).show() uNet = WorldEstimatingNetwork() classNet = DigitClassifcationNetwork()
from GameClass import Game import pygame g = Game() g.show_start_screen() while g.running: g.new() g.show_go_screen() pygame.quit()
import pygame from GameClass import Game import time from threading import Thread pygame.init() GAME_START: int = False game = Game(False, 10, 60) game.start() game.bg_screen_draw() game.on_start() pygame.display.update() clock_thread = Thread(target=game.clock_thread) clock_thread.start() def main(): try: while not GAME_START: time.sleep(0.01) event_list = pygame.event.get() for event in event_list: if event.type == pygame.QUIT: print("Stop") game.stop_event_listener = True game.stop_thread = True pygame.quit() quit() if event.type == pygame.KEYDOWN: keys_p = pygame.key.get_pressed() if keys_p[pygame.K_ESCAPE]:
from ColonyClass import Colony, Planet from GameClass import Game from GUI import GUI # Initialize game object p1 = Planet('large', 'abundant', 'normal', 'tundra') c1 = Colony(p1, 'Ecber II', 2, 1, 1, ['hydroponicFarm', 'marineBarracks']) p2 = Planet('large', 'abundant', 'normal', 'desert') c2 = Colony(p2, 'Fahd II', 2, 1, 1, ['marineBarracks', 'hydroponicFarm']) p3 = Planet('medium', 'abundant', 'normal', 'terran') c3 = Colony(p3, 'Mentar IV', 2, 3, 3, [ 'automatedFactory', 'hydroponicFarm', 'biospheres', 'marineBarracks', 'soilEnrichment' ]) starting_tech_positions = [('construction', 6), ('chemistry', 3), ('sociology', 2), ('computers', 3), ('biology', 2)] game = Game(starting_tech_positions, [c1, c2, c3], reserve=200, stored_rp=0) # Initialize GUI gui = GUI(game)
x.insert((i+1), (leftX + j)) y.insert((i+1), realPoint) break i += 1 return x, y ###################################################################################################### #global myClassifier #global kerasClassifier myClassifier = LinearRegression(fit_intercept=True, normalize=True, n_jobs=-1) #kerasClassifier = Sequential() storage = Game() storage.load_stocks('under10') forcast_out = 50 testRange = 15 for stock in storage.allStocks: #predictions = keras_train_classifier(storage.allStocks[stock].dataPerTime, kerasClassifier, forcast_out) predictions = train_classifier(storage.allStocks[stock].dataPerTime, myClassifier, forcast_out) #plt.plot(predictions, 'b') plt.plot(storage.allStocks[stock].dataPerTime['close'], 'b') #x, y = find_low_points(storage.allStocks[stock]) #j, k = find_high_points(storage.allStocks[stock]) #x, y = lowest_ranged(x, y, storage.allStocks[stock].dataPerTime['close'], testRange) #j, k = highest_ranged(j, k, storage.allStocks[stock].dataPerTime['close'], testRange) #plt.plot(x, y, 'r')
def run(params): pygame.init() agent = DQNAgent(params) print_info(params) weights_filepath = params['weights_path'] if params['load_weights']: agent.model.load_weights(weights_filepath) print("Weights Loaded") else: print("Training From Scratch...") counter_games = 0 score_plot = [] counter_plot = [] record = 0 while counter_games < params['episodes']: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() # Class Objects game = Game(params) player1 = game.player food1 = game.food total_reward = -150 total_reward2 = 0 # First Move initialize_game(player1, game, food1, agent, params['batch_size'], counter_games) if params['display']: display(player1, food1, game, record, counter_games, total_reward, params) while not game.crash: if not params['train']: agent.epsilon = 0 else: # agent.epsilon is set to give randomness to actions agent.epsilon = 1 - ( (counter_games) * params['epsilon_decay_linear']) # old State state_old = agent.get_state(game, player1, food1, params) # Random Actions or Choose if randint(0, 1) < agent.epsilon and not params['load_weights']: final_move = to_categorical(randint(0, 2), num_classes=3) # Prediction else: prediction = agent.model.predict(state_old.reshape((1, 20))) final_move = to_categorical(np.argmax(prediction[0]), num_classes=3) player1.do_move(final_move, player1.x, player1.y, game, food1, agent) state_new = agent.get_state(game, player1, food1, params) reward = agent.set_reward(player1, game.crash, food1, counter_games, final_move) total_reward += reward total_reward2 += reward total_reward = round(total_reward, 2) total_reward2 = round(total_reward2, 2) if params['train']: # train short memory base on the new action and state agent.train_short_memory(state_old, final_move, reward, state_new, game.crash) # store the new data into a long term memory agent.remember(state_old, final_move, reward, state_new, game.crash) record = get_record(game.score, record) if params['display']: display(player1, food1, game, record, counter_games, total_reward, params) if params['train']: agent.replay_new(agent.memory, params['batch_size']) counter_games += 1 print( f'Game {counter_games} Score: {game.score} Reward: {total_reward2}' ) score_plot.append(game.score) counter_plot.append(counter_games) if counter_games % params['cf'] == 0: n = int(counter_games / params['cf']) agent.model.save_weights(params['cp'] + str(n) + ".hdf5") print("Checkpoint Saved...") plot_seaborn(counter_plot, score_plot) pygame.quit() quit()
data = map.map # Print the number of the current map print(map.number) # Create a Robot that starts at (0,0) # The Robot Class stores the current position of the robot # and provides ways to move the robot robot = Robot(0, 0) # The RandomNavigator class makes the robot move in random directions # TODO: You will want to write other navigators with different exploration strategies. navigator = RandomNavigator() # Create a Game object, providing it with the map data, the goal location of the map, the navigator, and the robot game = Game(data, map.number, navigator, robot) # This loop runs the game for 1000 ticks, stopping if a goal is found. for x in range(0, 1000): found_goal = game.tick() print( f"{game.getIteration()}: Robot at: {robot.getLoc()}, Score = {game.getScore()}" ) if found_goal: print(f"Found goal at time step: {game.getIteration()}!") break print(f"Final Score: {game.score}") # Show how much of the world has been explored im = Image.fromarray(np.uint8(game.exploredMap)).show()
def main(): # nums = np.arange(0,10) # for i in nums: # print("current img num", i) option = "simple" M = Map(0) data = M.map robot = Robot(0, 0) navigator = greedy(robot) real_goal = get_goal(M.number) game = Game(data, real_goal, navigator, robot) print("actual goal", real_goal) print("actual number:", M.number) # pdb.set_trace() # define networks wEstNet = WorldEstimatingNetwork() classNet = DigitClassifcationNetwork() mask = np.zeros((28, 28)) path_taken = np.zeros((28, 28)) rewards = 0 while True: path_taken[robot.getLoc()[0], robot.getLoc()[1]] = 1 # record path being taken by robot for x in range(0, 28): for y in range(0, 28): if game.exploredMap[x, y] != 128: mask[x, y] = 1 image = wEstNet.runNetwork(game.exploredMap, mask) char = classNet.runNetwork(image) # softmax output values prob = calc_prob(char) print("prob:", prob) print("current prediction", char.argmax()) if prob > 0.95: break new_image = (image * (1 - mask)) # print("mask", mask[22,17]) # pdb.set_trace() run, rewards = game.tick_greedy(new_image, option, rewards) if run == True: break # print(char) # print(char.argmax()) # Image.fromarray(image).show() # a = plt.imshow(image) # plt.show() # print(mask) # pdb.set_trace() print("robot finds path") robot_goal = get_goal(char.argmax()) prevLoc = np.array([robot.getLoc()[0], robot.getLoc()[1]]) while True: check_goal, rewards = game.tick(robot_goal, prevLoc, rewards) if check_goal == True: print("prediction is right") print("rewards", rewards) break if check_goal == False: flag = 1 print("rewards", rewards) print("WRONG DESTINATION") # check if robot is moving towards the right goal prevLoc = np.array([robot.getLoc()[0], robot.getLoc()[1]])
def game(): game = Game() game.create_frame() yield game
while again.lower() == "y": difficulty = input("Please enter the difficulty of the game. (Easy: e /Medium: m/Hard : h) ") while difficulty.lower() not in ["e","m","h"] : difficulty = input("Sorry. I didn't catch that. Please enter again. (Easy: e /Medium: m/Hard : h) ") if difficulty.lower() in ["m","h"] : bonus = input("Would you like to activate magic boxes in this game? (Yes: y/No: n) ") while bonus.lower() not in ["y","n"]: bonus = input("Sorry. I didn't catch that. Please enter again. (Yes: y/No: n) ") elif difficulty.lower() == "e": bonus = "n" print("\n") game = Game(difficulty.lower(),bonus.lower()) game.gameSetup() again = input("Would you like to try again? (Yes: y/No: n) ") while again.lower() not in ["y", "n"]: again = input("Sorry. I didn't catch that. Please enter again. (Yes: y/No: n) ")