Example #1
0
    def __init__(self,
                 game,
                 model,
                 action_range,
                 field,
                 memory=None,
                 memory_size=1000,
                 nb_frames=None,
                 nb_epoch=1000,
                 batch_size=50,
                 gamma=0.9,
                 epsilon_range=[1., .01],
                 epsilon_rate=0.99,
                 reset_memory=False,
                 observe=0,
                 checkpoint=None):
        self.model = model
        self.game = game
        self.field = field
        self.memory_size = memory_size
        self.nb_frames = nb_frames
        self.nb_epoch = nb_epoch
        self.batch_size = batch_size
        self.gamma = gamma
        self.epsilon_range = epsilon_range
        self.epsilon_rate = epsilon_rate
        self.reset_memory = reset_memory
        self.observe = observe
        self.checkpoint = checkpoint
        self.action_range = action_range
        self.loss = 0
        self.score_last_games = []
        self.ma_score_list = []

        self.replay = Replay(self.field, self.memory_size, gamma=self.gamma)
    def __init__(self, task):
        # Hyper parameters
        self.learning_rate_actor = 1e-4
        self.learning_rate_critic = 1e-3
        self.gamma = 0.99
        self.tau = 0.001

        # Define net
        self.sess = tf.Session()
        self.task = task
        self.actor = ActorNet(self.sess, self.task.state_size, self.task.action_size, self.learning_rate_actor, \
                     self.task.action_low, self.task.action_high, self.tau)
        self.critic = CriticNet(self.sess, self.task.state_size, self.task.action_size, self.learning_rate_critic, self.tau)

        # Define noise
        self.mu = 0
        self.theta = 0.15
        self.sigma = 0.20
        self.noise = OUNoise(self.task.action_size, self.mu, self.theta, self.sigma)

        # Define memory replay
        self.buffer_size = 1000000
        self.batch_size = 64
        self.memory = Replay(self.buffer_size, self.batch_size)

        # Score
        self.best_score = -np.inf
        self.best_reward = -np.inf
Example #3
0
    def replay_button_interaction(self):
        mouse_pos = pygame.mouse.get_pos()
        if (self.replay_button.coords[0] < mouse_pos[0] < self.replay_button.coords[0] + self.replay_button.dimensions[0] and
                self.replay_button.coords[1] < mouse_pos[1] < self.replay_button.coords[1] + self.replay_button.dimensions[1]):
            self.replay_button.button_light(self.screen, (125, -3))
            mouse_click = pygame.mouse.get_pressed()
            if mouse_click[0] == 1:
                questions = ['Sensor size', 'Replay_data path']
                input_box = InputBoxMenu(self.screen, len(questions),
                                         (self.replay_button.coords[0] + 25, self.replay_button.coords[1] + 75),
                                         questions, [int, 'path + csv'])
                input_box.help()
                inputs = input_box.ask_boxes()
                check = input_box.check_inputbox_input()
                error_message_pos = [20, 20]

                while check in input_box.errors:
                    self.display_error_message('Error ' + check, position=tuple(error_message_pos), sleep_time=0)
                    error_message_pos[1] += 40
                    inputs = input_box.ask_boxes()
                    check = input_box.check_inputbox_input()

                replay = Replay(self.screen, self.screen_width, self.screen_height,
                                activations=self.activation_cbox.isChecked(),
                                traffic=self.traffic_cbox.isChecked(),
                                sensors=self.sensors_cbox.isChecked(),
                                distance_sensor=self.distance_sensor_cbox.isChecked(),
                                sensor_size=int(inputs[0]),
                                enabled_menu=True)
                replay.replay(inputs[1], enable_trajectory=True)
                quit()
        else:
            self.replay_button.draw_button(self.screen, (125, -3))
Example #4
0
    def post(self):
        upload_files = self.get_uploads(
            'file')  # 'file' is file upload field in the form
        if not upload_files:
            self.redirect('/failed/nofile/')
            return
        blob_info = upload_files[0]
        key = blob_info.key()
        if blob_info.size > 1048576:
            blob_info.delete()
            self.redirect('/failed/sizeerror/%s' % blob_info.filename)
            return
        blob_reader = blobstore.BlobReader(key)
        magic = blob_reader.read(50)
        if magic[0:3] != "MPQ" or not "StarCraft II replay" in magic:
            blob_info.delete()
            self.redirect('/failed/typeerror/%s' % blob_info.filename)
            return

        replayid = counter_as_string('principal')
        increment('principal')

        m = md5()
        m.update(blob_reader.read(blob_info.size))
        replaymd5 = m.hexdigest()

        replay = Replay(replayid=replayid,
                        replaymd5=replaymd5,
                        blobinfo=str(key),
                        ip=self.request.remote_addr)
        replay.put()

        self.redirect('/success/%s' % replayid)
Example #5
0
 def wrapper(*args, **kwargs):
     map_id = args[0]
     user_id = args[1]
     lzma = Cacher.check_cache(map_id, user_id)
     if (lzma):
         replay_data = osrparse.parse_replay(lzma, pure_lzma=True).play_data
         return Replay(replay_data, user_id)
     else:
         return function(*args, **kwargs)
Example #6
0
 def startReplay(self):
     self.unloadMap()
     self.replay = Replay()
     self.world = self.replay.loadWorld()
     self.world.application = self
     self.battle_controller = BattleController(self, self.world)
     self.ai = AI(self, self.world)
     self.createVisual()
     self.replay.loadCommands()
Example #7
0
 def __init__(self, config, scene):
     self.vrep_path = config.vrep_path
     self.viz = config.visualization
     self.autolaunch = config.autolaunch
     self.port = config.api_port
     self.clientID = None
     self.scene = scene
     self.dt = config.dt
     self.replay = Replay(config.max_buffer, config.batch_size)
     self.batch_size = config.batch_size
Example #8
0
 def wrapper(*args, **kwargs):
     cacher = args[0]
     map_id = args[1]
     user_id = args[2]
     enabled_mods = args[4]
     lzma = cacher.check_cache(map_id, user_id)
     if (lzma):
         replay_data = osrparse.parse_replay(lzma, pure_lzma=True).play_data
         return Replay(replay_data, user_id, enabled_mods)
     else:
         return function(*args, **kwargs)
def main(_):
    pp.pprint(flags.FLAGS.__flags)
    with tf.Session() as sess:
        data_loader = Data_loader(FLAGS.embedding_file, FLAGS.embedding_size)
        q_network = Q_network(sess, FLAGS.embedding_size, FLAGS.step_size,
                              FLAGS.target_frequency, FLAGS.hidden_units,
                              FLAGS.final_units, FLAGS.greedy_ratio,
                              data_loader)
        replay = Replay(q_network, FLAGS.minibatch_size, FLAGS.replay_size)
        model = DQL(FLAGS.budget, data_loader, q_network, replay)
        model.run()
Example #10
0
 def replay(self, args):
     Log.log_switch = False
     Replay.switch = True
     if args.lite:
         Replay.mode = 'LITE'
         print('* MODE : LITE *')
         if args.transactionid and args.date:
             print('Please specify only one type of data for replay')
             return
         elif args.transactionid:
             Replay().replay_execute(self.parser,
                                     transaction_id=args.transactionid)
         elif args.date:
             Replay().replay_execute(self.parser,
                                     start_time=args.date[0],
                                     end_time=args.date[1])
         else:
             Replay().replay_execute(self.parser)
     else:
         print('* MODE : REPLAY *')
         if args.transactionid and args.date:
             print('Please specify only one type of data for replay')
             return
         elif args.transactionid:
             Replay().replay_execute(self.parser,
                                     transaction_id=args.transactionid)
         elif args.date:
             Replay().replay_execute(self.parser,
                                     start_time=args.date[0],
                                     end_time=args.date[1])
         else:
             Replay().replay_execute(self.parser)
    def __init__(self, bin_file_path):
        """
        Args:
            bin_file_path (string): File path containing preprocessed
        """
        self.game_states = []
        self.root_dir = bin_file_path

        for root, dirs, files in os.walk(bin_file_path):
            for name in files:
                if name.split('.')[-1] != "bin":
                    continue

                with open(os.path.join(self.root_dir, name), 'rb') as f:
                    file_content = f.read()
                    _, states = Replay(file_content)

                    for state in states:
                        if state.players is None:
                            continue

                        if len(state.players) != 6:
                            continue

                        if state.state != State.Game:
                            continue

                        # add default state, team red
                        self.add_states(state, Team.Red)

                        # add state flipped about x axis, team red
                        self.add_states(
                            du.flip_state(state,
                                          x_axis_flip=True,
                                          y_axis_flip=False), Team.Red)

                        # add state flipped about y axis, team blue
                        self.add_states(
                            du.flip_state(state,
                                          x_axis_flip=False,
                                          y_axis_flip=True), Team.Blue)

                        # add state flipped about x and y axis, team blue
                        self.add_states(
                            du.flip_state(state,
                                          x_axis_flip=True,
                                          y_axis_flip=True), Team.Blue)

        self.game_states = du.filter_states_3v3(game_states=self.game_states)
Example #12
0
    def __depth_first_search(self, start_path, inter_path):
        current_path = join(start_path, inter_path)
        dirs_and_files = listdir(current_path)
        dirs = []
        files = []

        for df in dirs_and_files:
            # print(current_path)

            if is_replay(join(current_path, df)):
                files.append(df)

            if isdir(join(current_path, df)):
                if 'Replays' == df:
                    raise Exception('Replays folder is already formed')
                else:
                    dirs.append(df)

        for i in range(len(dirs)):
            inter = join(inter_path, dirs[i])
            # print('recurse', inter)
            self.__depth_first_search(start_path, inter)

        key = ''
        #finished recursive steps, now we read the discovered replays
        for i in range(len(files)):

            src_file = join(start_path, inter_path, files[i])
            original = Replay(src_file)
            keys = self.__inspector.inspect(original)

            #go through each key
            for j in range(len(keys)):
                replay = copy_replay(original)
                key = keys[j]

                #place replays in proper folders
                if key in self.__folders.keys():
                    self.__folders[key].append(replay)

                else:
                    self.__folders[key] = []

                    #series flag -1 means there are no replay with the same player names, yet ...
                    replay.series_flag = -1
                    self.__folders[key].append(replay)
Example #13
0
    def __init__(self, bin_file_path):
        """
        Args:
            bin_file_path (string): File path containing preprocessed
        """
        self.game_states = []
        self.root_dir = bin_file_path

        for root, dirs, files in os.walk(bin_file_path):
            for name in files:
                if name.split('.')[-1] == "bin":
                    with open(os.path.join(self.root_dir, name), 'rb') as f:
                        file_content = f.read()
                        _, states = Replay(file_content)

                        for state in states:
                            if state.players is not None and len(
                                    state.players) == 2:
                                # flip states so that opposing demonstrations are learned
                                #   and there are more states to learn from

                                # add default state, team 0
                                self.game_states.append((state, 0))

                                # add state flipped about x axis, team 0
                                self.game_states.append(
                                    (du.flip_state(state,
                                                   flip_x=True,
                                                   flip_y=False), 0))

                                # add state flipped about y axis, team 1
                                self.game_states.append(
                                    (du.flip_state(state,
                                                   flip_x=False,
                                                   flip_y=True), 1))

                                # add state flipped about x and y axis, team 1
                                self.game_states.append(
                                    (du.flip_state(state,
                                                   flip_x=True,
                                                   flip_y=True), 1))

        self.game_states = du.filter_states(game_states=self.game_states)
def main():
    xminmax = [0, 0]
    yminmax = [0, 0]
    bin_file_path = 'preprocessed'

    for root, dirs, files in os.walk(bin_file_path):
        for name in files:
            if name.split('.')[-1] == "bin":
                print(name)
                with open(os.path.join(bin_file_path, name), 'rb') as f:
                    file_content = f.read()
                    _, states = Replay(file_content)

                    for state in states:
                        if state.players is not None and len(
                                state.players) == 2:
                            xmin = state.players[0].disc.x if state.players[
                                0].disc.x < state.players[
                                    1].disc.x else state.players[1].disc.x
                            xmax = state.players[0].disc.x if state.players[
                                0].disc.x > state.players[
                                    1].disc.x else state.players[1].disc.x

                            ymin = state.players[0].disc.y if state.players[
                                0].disc.y < state.players[
                                    1].disc.y else state.players[1].disc.y
                            ymax = state.players[0].disc.y if state.players[
                                0].disc.y > state.players[
                                    1].disc.y else state.players[1].disc.y

                            xminmax[
                                0] = xmin if xmin < xminmax[0] else xminmax[0]
                            xminmax[
                                1] = xmax if xmax > xminmax[1] else xminmax[1]

                            yminmax[
                                0] = ymin if ymin < yminmax[0] else yminmax[0]
                            yminmax[
                                1] = ymax if ymax > yminmax[1] else yminmax[1]
                    print('x min max:', xminmax)
                    print('y min max:', yminmax)
                print('---------------------')
Example #15
0
    def __init__(self, bin_file_path):
        """
        Args:
            bin_file_path (string): File path containing preprocessed
        """
        self.game_states = []
        self.root_dir = bin_file_path

        for root, dirs, files in os.walk(bin_file_path):
            for name in files:
                if name.split('.')[-1] == "bin":
                    with open(os.path.join(self.root_dir, name), 'rb') as f:
                        file_content = f.read()
                        _, states = Replay(file_content)

                        for state in states:
                            if state.players is not None and len(
                                    state.players) == 2:
                                self.game_states.append((state, 0))
                                self.game_states.append((state, 1))
Example #16
0
 def __init__(self, parameters):
     # Gym environment parameters
     self.env_name = parameters.environment_name
     self.env = gym.make(self.env_name)
     self.state_dim = self.env.observation_space.shape[0]
     self.action_dim = self.env.action_space.n
     # Training parameters
     self.discount = Training_parameters.discount
     self.train_episodes = parameters.train_episodes
     self.test_episodes = Training_parameters.test_episodes
     self.test_frequency = Training_parameters.test_frequency
     self.render_decision = parameters.render_decision
     self.render_frequency = Training_parameters.render_frequency
     # Replay memory parameters
     self.memory = Replay()
     self.memory.burn_memory(self.env)
     # Q-networks parameters
     self.Q_net = Network(self.state_dim, self.action_dim, Network_parameters.Q_net_var_scope, parameters.duel)
     self.target_Q_net = Network(self.state_dim, self.action_dim, Network_parameters.target_Q_net_var_scope, parameters.duel)
     self.update_target_frequency = Training_parameters.update_target_frequency
     self.double = parameters.double
    def __init__(
        self,
        batch_size=64,
        device='cpu',
        gamma=0.95,
        gradient_clip=0.0,
        loss_fn='L2',
    ):
        self.env = gym.make('CartPole-v0')
        self.input_size = self.env.observation_space.shape[0]
        self.num_actions = self.env.action_space.n

        self.device = device
        self.qnet = CartPolePolicy(self.input_size, self.num_actions, device)
        self.target_qnet = CartPolePolicy(self.input_size, self.num_actions,
                                          device)
        self.target_qnet.copy_params_(self.qnet)

        self.eps_sch = LinearEpsilonScheduler()

        self.optimizer = optim.Adam(self.qnet.parameters(), lr=1e-4)

        if gradient_clip > 0.0:
            for p in self.qnet.parameters():
                p.register_hook(lambda grad: torch.clamp(
                    grad, min=-gradient_clip, max=gradient_clip))

        self.schema = DataSchema(
            names=["prev_state", "action", "reward", "state", "done"],
            shapes=[(self.input_size, ), (1, ), (1, ), (self.input_size, ),
                    (1, )],
            dtypes=[np.int64, np.int64, np.float32, np.float32, np.float32],
        )

        self.replay = Replay(100000, self.schema)

        self.batch_size = batch_size
        self.gamma = gamma
        self.loss_fn = loss_fn
Example #18
0
 def play_one_game(self):
     replay = Replay()
     s = self.env.reset()
     count = 0
     while True:
         conv_s = np.reshape(s, [1, 84, 84, 4])
         p_g = self.nns["good"].predict(conv_s)
         p_n = self.nns["normal"].predict(conv_s)
         p_b = self.nns["bad"].predict(conv_s)
         p = 2 * p_g["pi"][0] + p_n["pi"][0] - p_b["pi"][0]
         p += np.ones_like(self.a)
         p /= np.sum(p)
         a = np.random.choice(self.a, p=p)
         s_, r, t, _ = self.env.step(a)
         replay.add(s, a)
         replay.score += r
         s = s_
         count += 1
         if count % 10 == 0:
             print(".", end="", flush=True)
         if t:
             print()
             break
     return replay
Example #19
0
def handle_replay(node, seed, command, transfers, **kwargs):
    # Check if a valid command
    arguments = command.split(' ', 1)
    t_id = None

    try:
        t_id = arguments[1]
    except IndexError:
        return pretty_print('Invalid command - See example usage.')

    bundle = None
    t_id = t_id.strip()

    if not transfers:
        return pretty_print('Looks like you do not have any account history.')
    
    for transfer in transfers:
        id_as_string = str(transfer['short_transaction_id'])

        if id_as_string == t_id:
            bundle = transfer['bundle']
            break

    if bundle is None:
        return pretty_print(
            'Looks like there is no bundle associated with your specified short transaction id. Please try again'
        )

    pretty_print('Starting to replay your specified bundle. This might take a few second...', color='green')
    return Replay(
        node,
        seed,
        bundle,
        replay_callback=lambda message: pretty_print(message, color='blue'),
        **kwargs
    )
Example #20
0
    def __init__(self, basepath):
        self.recinfo = Recinfo(basepath)

        self.position = ExtractPosition(self.recinfo)
        self.epochs = behavior_epochs(self.recinfo)
        self.artifact = findartifact(self.recinfo)
        self.makePrmPrb = makePrmPrb(self.recinfo)
        self.utils = SessionUtil(self.recinfo)

        self.spikes = spikes(self.recinfo)
        self.brainstates = SleepScore(self.recinfo)
        self.swa = Hswa(self.recinfo)
        self.theta = Theta(self.recinfo)
        self.spindle = Spindle(self.recinfo)
        self.gamma = Gamma(self.recinfo)
        self.ripple = Ripple(self.recinfo)
        self.placefield = pf(self.recinfo)
        self.replay = Replay(self.recinfo)
        self.decode = DecodeBehav(self.recinfo)
        self.localsleep = LocalSleep(self.recinfo)
        self.viewdata = SessView(self.recinfo)
        self.pbe = PBE(self.recinfo)

        self.eventpsth = event_event()
Example #21
0
                print(f"{error_text}\"screen_height\"")
                exit(-1)
        elif "screen_width" in line:
            try:
                configurations["screen_width"] = int(line.split("=")[1])
            except ValueError or IndexError:
                print(f"{error_text}\"screen_width\"")
                exit(-1)
        elif "input_delay" in line:
            try:
                configurations["input_delay"] = float(line.split("=")[1])
            except ValueError or IndexError:
                print(f"{error_text}\"input_delay\"")
                exit(-1)

    if len(configurations) < 4:
        print("config.txt is incomplete.")
        exit(-1)
    return configurations


if __name__ == '__main__':
    record, replay, replay_filename, seed = interpret_parameters()
    if not replay:
        gtg = GateToGods(seed, replay_filename)
        gtg.play()
    else:
        colours = Colours()
        r = Replay(replay_filename, colours)
        r.play_replay(Input(), colours)
Example #22
0
import time
import getopt
import sys
sys.path.append('..')

import numpy as np

if __name__ == '__main__':
    node = sys.argv[1]

    if node == '-r':
        from replay import Replay
        Replay(sys.argv[2:])
    elif node == '-l':
        from hitted_main import LearnerHitted, AgentHitted

        LearnerHitted(sys.argv[2:], AgentHitted)
    elif node == '-rl':
        from hitted_main import ReplayLearnerHitted, AgentHitted

        ReplayLearnerHitted(sys.argv[2:], AgentHitted)
    elif node == '-a':
        from hitted_main import ActorHitted, AgentHitted

        ActorHitted(sys.argv[2:], AgentHitted)
    else:
        print('the first arg must be one of -r, -l, -rl, or -a')
Example #23
0
from nn import MLP, hyperparams
from game import tictactoe
from replay import Replay
import math

# Tictactoe board: [x1,...,x8] vector of length=9
# 0 = empty
# 1 = self
# 2 = enemy
# Action: [0,...,1,...,0] vector of length=9
# all elements are binary
# at most one 1

# Experience Replay Initialization
replay_num = 20
m = Replay(replay_num)

# Initialize Q and target neural network
hyp = hyperparams(0.5, 2, 50)

torch.manual_seed(hyp.seed)


def accuracy(predictions, label):
    total_corr = 0
    index = 0
    for c in predictions.flatten():
        if (c.item() > 0.5):
            r = 1.0
        else:
            r = 0.0
Example #24
0
	def startRecording(self):
		self.recording = True
		self.replay = Replay()
		self.replay.saveWorld(self.world)
Example #25
0
from character import Character
# About Character class:
#   Design with 4*4 grid layout.
#   Rightmost column is for spacing.
from replay import Replay
from crosstest import cross_test

# Initialize modules
SCR_RECT = pygame.Rect(0, 0, 640, 480)
pygame.init()
screen = pygame.display.set_mode(SCR_RECT.size)
pygame.display.set_caption("KOBATO")
surface = pygame.Surface(SCR_RECT.size)
character = Character(screen)
replay = Replay()
# Constant
GRID = 50
CELL = 0
if SCR_RECT[3] <= SCR_RECT[2]:
    CELL = SCR_RECT[3] // 54
else:
    CELL = SCR_RECT[2] // 54
MARGIN_X = (SCR_RECT[2] - GRID * CELL) // 2
MARGIN_Y = (SCR_RECT[3] - GRID * CELL) // 2
GRID_FOR_CHAR = CELL // 2
LIGHT_GRAY = (110, 110, 110)
DARK_GRAY = (50, 50, 50)
GRID_COLOR = (30, 30, 30)
BG_COLOR = (0, 0, 0)
PLAYER_COLOR = (255, 0, 100)
Example #26
0
	def getCommand(self, pipe):
		#world_buffer = serializer.dump(self.world)
		best_commands = [BattleCommand("endTurn")]
		best_score = self.scoreEndTurn(self.world.current_character_turn)
		self.actions_considered = 1

		# action without movement with current AP
		for action in self.world.current_character_turn.combat_actions:
			if action.AP_cost <= self.world.current_character_turn.cur_AP:
				valid_targets = self.world.getValidTargets(self.world.current_character_turn, action.targeting_rules)
				for target in valid_targets:
					score = self.scoreAction(action, target, self.world) / self.scoreAP(action.AP_cost, self.world.current_character_turn)
					if score > best_score:
						best_commands = [BattleCommand("executeAction", target.ID, action)]
						best_score = score

		#if best_commands[0].command == "executeAction":
		#	print "Best action score:", best_score

		# movement + action with current AP
		possible_moves = self.world.possibleMoves()
		counter = 0
		for move in possible_moves:
			score, action = self.scoreMove(move, 0, self.world)
			if score > best_score:
				best_commands = [BattleCommand("run", move.ID), action]
				best_score = score
			counter += 1
			pipe.send(float(counter) / len(possible_moves))

		# if no move was found yet we'll try multiturn moves
		if best_commands[0].command == "endTurn":
			# action with increased AP
			for action in self.world.current_character_turn.combat_actions:
				valid_targets = self.world.getValidTargets(self.world.current_character_turn, action.targeting_rules)
				for target in valid_targets:
					score = self.scoreAction(action, target, self.world) / self.scoreAP(action.AP_cost, self.world.current_character_turn)
					if score > best_score:
						best_commands = [BattleCommand("executeAction", target.ID, action), BattleCommand("endTurn")]
						best_score = score

			# movement + action with increased AP
			turns = 1
			while True:
				possible_moves = self.world.possibleMoves(turns)
				#print "turns:", turns, "; len(possible_moves):", len(possible_moves)
				if len(possible_moves) == 0:
					break
				counter = 0
				for move in possible_moves:
					score, action = self.scoreMove(move, turns, self.world)
					if score > best_score:
						best_commands = [BattleCommand("run", move.ID), action, BattleCommand("endTurn")]
						best_score = score
					counter += 1
					#if (counter % 4) == 0:
					pipe.send(float(counter) / len(possible_moves))
				if best_commands[0].command != "endTurn":
					break
				turns += 1

		#if best_commands[0].command == "run":
		#	print "Best move score:", best_score

		#print "Commands considered:", self.actions_considered
		#return best_command
		replay = Replay()
		replay.commands = best_commands
		pipe.send(replay)
Example #27
0
    def __init__(self, replay_id: ReplayId, tickets: Tuple[Ticket, Ticket]):
        time = tickets[0].wishes.wished_time

        self.replay = Replay(replay_id, tickets)
        self.timeLeft = (time, time)
        self.gameState = GameState()
Example #28
0
	- number of cores on board
	- number of units spawned in a single turn (max)
	- dramatic drop in score
'''

n = 2000
scores = {}

print('Checking replays:')
start = time.clock()
for i, ID in enumerate(range(total_matches, total_matches - n, -1)):
    try:
        print('\t{}.......{}\t\t\t\r'.format(i, ID), end='')
        raw_str = get_match_raw_str(ID, prefix=False)
        str_data = svr.get_page_content(raw_str)
        scores[ID] = Replay(ID, str_data).get_score()
    except json.decoder.JSONDecodeError as e:
        print('Failed with id: {}...skipping'.format(ID))

print()
print('Time elapsed: {}'.format(time.clock() - start))
print()

# print ('Scores:')
# for k,v in scores.items():
# 	print ('\t{: <7} : {}'.format(str(k),str(v)))

print()
match_str = svr.get_match_str(max(scores.keys(), key=lambda k: scores[k]))
print('Match link: {}'.format(match_str))
print()
Example #29
0
from scaner import FindWindow
from locator import Locator
from util.scissors import Scissors
from replay import Replay
import cv2 as cv
import numpy as np
import time
# import matplotlib.pylab as plt
# if __name__ == "__main__":
# wdname 为连连看窗口的名称,必须写完整
wdname = u'OMS外包管理系统 - Google Chrome'
demo = FindWindow(wdname)
loct = Locator()
rep = Replay()
scor = Scissors()
# time.sleep(1)
if demo.hwnd:
    temps = [
        'step-1.jpg', 'step-2.jpg', 'step-3.jpg', 'step-4.jpg', 'step-5.jpg',
        'step-6.jpg', 'step-7.jpg', 'step-8.jpg', 'step-9.jpg', 'step-10.jpg',
        'step-11.jpg'
    ]
    loop = 0
    while loop < len(temps):
        window = scor.cutout(demo.position())
        window = cv.cvtColor(np.array(window), cv.COLOR_RGB2BGR)  #PIL转cv
        cur_temp = 'assets/%s' % temps[loop]
        max, min = loct.getCoord(window, cur_temp)
        template = cv.imread(cur_temp, 0)
        w, h = template.shape[::-1]
        cv.rectangle(window, min, (min[0] + w, min[1] + h), (0, 0, 255), 2)
Example #30
0
def save_data(replay_paths):
    # description = "%s" % (".".join(replay_paths))

    # hash = hashlib.md5(description.encode('utf-8')).hexdigest()
    # pickle_path = os.path.join('data', "%s.pickle")
    # if os.path.exists(pickle_path):
    #     print("%s already saved" % pickle_path)
    #     return

    expand_dataset = Dataset(sections=[], labels=[])
    conquer_dataset = Dataset(sections=[], labels=[])
    buckets = Buckets(expand=expand_dataset, conquer=conquer_dataset)

    for replay_path in replay_paths:
        print("Processing replay %s with kernels %d and %d" %
              (replay_path, expand_kernel_size, conquer_kernel_size))
        # pickle_path = os.path.join('data/', '%s.%d.%s' % (filename, kernel_size, 'pickle'))

        print("Loading replay %s" % replay_path)
        replay = Replay(replay_path)
        replay.load()
        print(replay)
        print("Combining data ... ")
        replay.combine_data()

        first_stage_limit = replay.find_sections_count_before_first_collision()

        # Expand Data prep
        print("Padding with %d ... " % expand_kernel_size)
        replay.prepare_padded_arrays(expand_kernel_size)
        print("Generating sections for cells with surrounding")
        sections, labels = replay.get_sections_and_labels(own=False)

        print("Collect expand phase. First %d moves." % first_stage_limit)
        expand_sections, expand_labels = sections[:
                                                  first_stage_limit], labels[:
                                                                             first_stage_limit]
        print("Rotate each section")
        expand_sections, expand_labels = rotate_all_sections(
            expand_sections, expand_labels)
        buckets.expand.sections.append(expand_sections)
        buckets.expand.labels.append(expand_labels)

        # Conquer Data prep
        # print("Padding with %d ... " % conquer_kernel_size)
        # replay.prepare_padded_arrays(conquer_kernel_size)
        # print("Generating sections for OWN cells with surrounding")
        # sections, labels = replay.get_sections_and_labels(own=True)
        #
        # print("Collect conquer phase. Last %d moves." % (len(sections) - first_stage_limit))
        # conquer_sections, conquer_labels = sections[first_stage_limit:], labels[first_stage_limit:]
        # print("Rotate each section")
        # conquer_sections, conquer_labels = rotate_all_sections(conquer_sections, conquer_labels)
        # buckets.conquer.sections.append(conquer_sections)
        # buckets.conquer.labels.append(conquer_labels)

    # Expand
    expand_dataset = Dataset(sections=np.concatenate(buckets.expand.sections,
                                                     axis=0),
                             labels=np.concatenate(buckets.expand.labels,
                                                   axis=0))

    train_data, test_data, train_labels, test_labels = train_test_split(
        expand_dataset.sections, expand_dataset.labels, train_size=.8)

    print("Equalizing test data and labels")
    # We want testing data to have equal amount of different classes
    # Otherwise accuracy can be spoiled
    test_data, test_labels = equalized_sections(test_data, test_labels)

    print("%d of expand training data, %d of expand testing data" %
          (len(train_data), len(test_data)))
    expand_data = {
        'train_data': train_data,
        'train_labels': train_labels,
        'test_data': test_data,
        'test_labels': test_labels,
        'kernel_size': expand_kernel_size
    }

    # Conquer
    # conquer_dataset = Dataset(
    #     sections=np.concatenate(buckets.conquer.sections, axis=0),
    #     labels=np.concatenate(buckets.conquer.labels, axis=0)
    # )
    #
    # train_data, test_data, train_labels, test_labels = train_test_split(
    #     conquer_dataset.sections, conquer_dataset.labels, train_size=.8)
    # print("Equalizing train data and labels")
    # train_data, train_labels = equalized_sections(train_data, train_labels)
    # print("Equalizing test data and labels")
    # test_data, test_labels = equalized_sections(test_data, test_labels)
    # print("%d of conquer training data, %d of conquer testing data" % (len(train_data), len(test_data)))
    # conquer_data = {
    #     'train_data': train_data,
    #     'train_labels': train_labels,
    #     'test_data': test_data,
    #     'test_labels': test_labels,
    #     'kernel_size': conquer_kernel_size
    # }
    conquer_data = None

    data = {'expand_data': expand_data, 'conquer_data': conquer_data}

    pickle_path = 'data/data.pickle'
    with open(pickle_path, 'wb') as f:
        print("Saving to %s" % pickle_path)
        pickle.dump(data, f)
    return data