Exemple #1
0
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)
        self.logging_in = False
        self.logging_in_status = None
        self.username = None

        # load the assets
        self.buttons = load("assets/buttons.png").convert_alpha()

        # create the input boxes
        self.username_box = InputBox((10, 150), (300, 40))
        self.password_box = InputBox((10, 230), (300, 40), type="password")
Exemple #2
0
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)

        self.fetch_global_highscores = True
        self.fetch_personal_highscores = True
        self.global_highscores_page = 0
        self.personal_highscores_page = 0
        self.highscore_page_idx = 0

        # load the assets
        self.mainmenu = load("assets/mainmenu.png").convert_alpha()
        self.numbers = load("assets/numbers.png").convert_alpha()
        self.buttons = load("assets/buttons.png").convert_alpha()
Exemple #3
0
 def set_network(self, ph, args, scope):
     fsize = args['fsize']
     conv_depth = args['conv_depth']
     n_layers = args['n_layers']
     kernel_init = args['kernel_init']
     
     network_output = Network(ph, self.out_size, scope, fsize, conv_depth, n_layers, n_strides=2, kernel_init=kernel_init)
     return network_output
 def dynamics_func(self, state, action, reuse):
     # add state, action normalization?
     sa = tf.concat([state, action], axis=1)
     delta_pred = Network(sa,
                          self.enc_dim,
                          'dynamics',
                          self.hid_size,
                          conv_depth=0,
                          n_hidden_dense=self.n_hidden,
                          reuse=reuse)
     n_state_pred = state + delta_pred
     return n_state_pred
Exemple #5
0
 def make_encoder(self, state, z_size, scope):
     """ Encodes the given state to z_size => create guass. distribution for q(z | s)
     """
     # conv operations
     z_mean = Network(state,
                      z_size,
                      scope,
                      self.hid_size,
                      conv_depth=self.n_hidden)
     z_logstd = tf.get_variable("logstd", shape=(z_size, ))
     return tfp.distributions.MultivariateNormalDiag(
         loc=z_mean, scale_diag=tf.exp(z_logstd))
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)
        self.arrow_keys = {
            pygame.K_UP: 0,
            pygame.K_w: 0,
            pygame.K_LEFT: 1,
            pygame.K_a: 1,
            pygame.K_DOWN: 2,
            pygame.K_s: 2,
            pygame.K_RIGHT: 3,
            pygame.K_d: 3,
        }

        # ingame parameters
        self.init = True
        self.pause = False
        self.score = 0
        self.game_over = False
        self.game_time = 0
        self.game_tick = 0.5
        self.game_tick_decrement = 0.05
        self.direction = 0  # [0,1,2,3] == [up,left,down,right]
        self.snake = [(5, 5), (5, 6), (5, 7)]  # 10 * 13 squares
        self.stain_pos = self._generate_stain_pos()

        # load the assets
        self.ready = load("assets/ready.png").convert_alpha()
        self.gameover = load("assets/gameover.png").convert_alpha()
        self.buttons = load("assets/buttons.png").convert_alpha()
        self.pausemenu = load("assets/pausemenu.png").convert_alpha()
        self.numbers = load("assets/numbers.png").convert_alpha()
        self.tail = load("assets/tail.png").convert_alpha()
        # load stains
        self.stains = [
            load("assets/stain1.png").convert_alpha(),
            load("assets/stain2.png").convert_alpha(),
            load("assets/stain3.png").convert_alpha(),
        ]
        # load heads
        self.heads = [
            load("assets/headup.png").convert_alpha(),
            load("assets/headleft.png").convert_alpha(),
            load("assets/headdown.png").convert_alpha(),
            load("assets/headright.png").convert_alpha(),
        ]
        self.stain = random.choice(self.stains)
Exemple #7
0
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)
        self.sound_on = True
        self.music_on = True

        # load the assets
        self.logo = load("assets/logo.png").convert_alpha()
        self.main_menu = load("assets/mainmenu.png").convert_alpha()
        self.register_login = load("assets/register_login.png").convert_alpha()
        self.buttons = load("assets/buttons.png").convert_alpha()
Exemple #8
0
 def make_discriminator(self,
                        z,
                        output_size,
                        scope,
                        n_layers,
                        hid_size,
                        reuse=False):
     """ Predict D(z = [z1, z2]) => p(y | z)
     """
     logit = Network(z,
                     output_size,
                     scope,
                     hid_size,
                     conv_depth=0,
                     n_hidden_dense=n_layers,
                     reuse=reuse)
     return tfp.distributions.Bernoulli(logit)
Exemple #9
0
class Trainer:
    def __init__(
            self,
            capacity_per_level=500000,
            warmup_steps=100000,
            n_frames=4,
            n_atoms=51,
            v_min=-1,
            v_max=0,
            gamma=.99,
            device='cuda',
            batch_size=48,
            lr=0.0000625 * 2,
            lr_decay=0.99,
            update_target_net_every=25000,
            train_every=6,
            frame_skip=4,
            disable_noisy_after=2000000,
            super_hexagon_path='C:\\Program Files (x86)\\Steam\\steamapps\\common\\Super Hexagon\\superhexagon.exe',
            run_afap=True):

        # training objects
        self.memory_buffer = MemoryBuffer(
            capacity_per_level,
            SuperHexagonInterface.n_levels,
            n_frames,
            SuperHexagonInterface.frame_size,
            SuperHexagonInterface.frame_size_cropped,
            gamma,
            device=device)
        self.net = Network(n_frames, SuperHexagonInterface.n_actions,
                           n_atoms).to(device)
        self.target_net = Network(n_frames, SuperHexagonInterface.n_actions,
                                  n_atoms).to(device)
        self.target_net.load_state_dict(self.net.state_dict())
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          lr=lr,
                                          eps=1.5e-4)
        self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer, ExpLrDecay(lr_decay, min_factor=.1))

        # parameters
        self.batch_size = batch_size
        self.update_target_net_every = update_target_net_every
        self.train_every = train_every
        self.frame_skip = frame_skip
        self.disable_noisy_after = disable_noisy_after
        self.warmup_steps = warmup_steps
        self.gamma = gamma
        self.device = device

        # parameters for distributional
        self.n_atoms = n_atoms
        self.v_min = v_min
        self.v_max = v_max
        self.delta_z = (v_max - v_min) / (n_atoms - 1)
        self.support = torch.linspace(v_min,
                                      v_max,
                                      n_atoms,
                                      dtype=torch.float,
                                      device=device)
        self.offset = torch.arange(0,
                                   batch_size * n_atoms,
                                   n_atoms,
                                   device=device).view(-1, 1)
        self.m = torch.empty((batch_size, n_atoms), device=device)

        # debug and logging stuff
        self.list_steps_alive = [[]
                                 for _ in range(SuperHexagonInterface.n_levels)
                                 ]
        self.longest_run = [(0, 0)] * SuperHexagonInterface.n_levels
        self.total_simulated_steps = [0] * SuperHexagonInterface.n_levels
        self.losses = []
        self.kls = []
        self.times = []
        self.iteration = 0

        self.super_hexagon_path = super_hexagon_path
        self.run_afap = run_afap

    def warmup(self, game, log_every):
        t = True
        for i in range(1, self.warmup_steps + 1):
            if i % log_every == 0:
                print('Warmup', i)
            if t:
                self.total_simulated_steps[game.level] += game.simulated_steps
                if self.total_simulated_steps[
                        game.level] > self.total_simulated_steps[game.level -
                                                                 1]:
                    game.select_level((game.level + 1) % 6)
                f, fc = game.reset()
                self.memory_buffer.insert_first(game.level, f, fc)
            a = np.random.randint(0, 3)
            (f, fc), r, t = game.step(a)
            self.memory_buffer.insert(game.level, a, r, t, f, fc)
        return t

    def train(
        self,
        save_every=50000,
        save_name='trainer',
        log_every=1000,
    ):

        game = SuperHexagonInterface(self.frame_skip,
                                     self.super_hexagon_path,
                                     run_afap=self.run_afap,
                                     allow_game_restart=True)

        # if trainer was loaded, select the level that was played the least
        if any(x != 0 for x in self.total_simulated_steps):
            game.select_level(np.argmin(self.total_simulated_steps).item())

        # init state
        f, fc = np.zeros(game.frame_size,
                         dtype=np.bool), np.zeros(game.frame_size_cropped,
                                                  dtype=np.bool)
        sf, sfc = torch.zeros((1, 4, *game.frame_size),
                              device=self.device), torch.zeros(
                                  (1, 4, *game.frame_size_cropped),
                                  device=self.device)
        t = True

        # run warmup is necessary
        if self.iteration == 0:
            if os.path.exists('warmup_buffer.npz'):
                self.memory_buffer.load_warmup('warmup_buffer.npz')
            else:
                t = self.warmup(game, log_every)
                self.memory_buffer.save_warmup('warmup_buffer.npz')

        # trainings loop
        last_time = time()
        save_when_terminal = False
        while True:

            self.iteration += 1

            # disable noisy
            if self.iteration == self.disable_noisy_after:
                self.net.eval()
                self.target_net.eval()

            # log
            if self.iteration % log_every == 0 and all(
                    len(l) > 0 for l in self.list_steps_alive):
                print(
                    f'{self.iteration} | '
                    f'{[round(np.mean(np.array(l[-100:])[:, 1]) / 60, 2) for l in self.list_steps_alive]}s | '
                    f'{[round(r[1] / 60, 2) for r in self.longest_run]}s | '
                    f'{self.total_simulated_steps} | '
                    f'{time() - last_time:.2f}s | '
                    f'{np.mean(self.losses[-log_every:])} | '
                    f'{np.mean(self.kls[-log_every:])} | '
                    f'{self.lr_scheduler.get_last_lr()[0]} | '
                    f'{game.level}')

            # indicate that the trainer should be saved the next time the agent dies
            if self.iteration % save_every == 0:
                save_when_terminal = True

            # update target net
            if self.iteration % self.update_target_net_every == 0:
                self.lr_scheduler.step()
                self.target_net.load_state_dict(self.net.state_dict())

            # if terminal
            if t:
                # select next level if this level was played at least as long as the previous level
                if self.total_simulated_steps[
                        game.level] > self.total_simulated_steps[game.level -
                                                                 1]:
                    game.select_level((game.level + 1) % 6)
                f, fc = game.reset()
                self.memory_buffer.insert_first(game.level, f, fc)
                sf.zero_()
                sfc.zero_()

            # update state
            sf[0, 1:] = sf[0, :-1].clone()
            sfc[0, 1:] = sfc[0, :-1].clone()
            sf[0, 0] = torch.from_numpy(f).to(self.device)
            sfc[0, 0] = torch.from_numpy(fc).to(self.device)

            # train
            if self.iteration % self.train_every == 0:
                loss, kl = self.train_batch()
                self.losses.append(loss)
                self.kls.append(kl)

            # act
            with torch.no_grad():
                self.net.reset_noise()
                a = (self.net(sf, sfc) *
                     self.support).sum(dim=2).argmax(dim=1).item()
            (f, fc), r, t = game.step(a)
            self.memory_buffer.insert(game.level, a, r, t, f, fc)

            # if terminal
            if t:
                if game.steps_alive > self.longest_run[game.level][1]:
                    self.longest_run[game.level] = (self.iteration,
                                                    game.steps_alive)
                self.list_steps_alive[game.level].append(
                    (self.iteration, game.steps_alive))
                self.total_simulated_steps[game.level] += game.simulated_steps
                self.times.append(time() - last_time)

                if save_when_terminal:
                    print('saving...')
                    for _ in range(60):
                        game.game.step(False)
                    self.save(save_name)
                    for _ in range(60):
                        game.game.step(False)
                    save_when_terminal = False

    def train_batch(self):

        # sample minibatch
        f, fc, a, r, t, f1, fc1 = self.memory_buffer.make_batch(
            self.batch_size)

        # compute target q distribution
        with torch.no_grad():
            self.target_net.reset_noise()
            qdn = self.target_net(f1, fc1)
            an = (qdn * self.support).sum(dim=2).argmax(dim=1)

        Tz = (r.unsqueeze(1) +
              t.logical_not().unsqueeze(1) * self.gamma * self.support).clamp_(
                  self.v_min, self.v_max)
        b = (Tz - self.v_min) / self.delta_z
        l = b.floor().long()
        u = b.ceil().long()

        l[(u > 0) & (l == u)] -= 1
        u[(l == u)] += 1

        vdn = qdn.gather(
            1,
            an.view(-1, 1,
                    1).expand(self.batch_size, -1,
                              self.n_atoms)).view(self.batch_size,
                                                  self.n_atoms)
        self.m.zero_()
        self.m.view(-1).index_add_(0, (l + self.offset).view(-1),
                                   (vdn * (u - b)).view(-1))
        self.m.view(-1).index_add_(0, (u + self.offset).view(-1),
                                   (vdn * (b - l)).view(-1))

        # forward and backward pass
        qld = self.net(f, fc, log=True)
        vld = qld.gather(
            1,
            a.view(-1, 1,
                   1).expand(self.batch_size, -1,
                             self.n_atoms)).view(self.batch_size, self.n_atoms)
        loss = -torch.sum(self.m * vld, dim=1).mean()

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()

        kl = F.kl_div(vld.detach(), self.m, reduction='batchmean')
        return loss.detach().item(), kl.item()

    def save(self, file_name='trainer'):

        # first backup the last save file
        # in case anything goes wrong
        file_name_backup = file_name + '_backup'
        if os.path.exists(file_name):
            os.rename(file_name, file_name_backup)

        # save this object
        with open(file_name, 'wb') as f:
            pickle.dump(self, f)

        # remove backup if nothing went wrong
        if os.path.exists(file_name_backup):
            os.remove(file_name_backup)

    @staticmethod
    def load(file_name='trainer'):
        with open(file_name, 'rb') as f:
            ret = pickle.load(f)
            assert ret.memory_buffer.last_was_terminal
            return ret
Exemple #10
0
    def __init__(
            self,
            capacity_per_level=500000,
            warmup_steps=100000,
            n_frames=4,
            n_atoms=51,
            v_min=-1,
            v_max=0,
            gamma=.99,
            device='cuda',
            batch_size=48,
            lr=0.0000625 * 2,
            lr_decay=0.99,
            update_target_net_every=25000,
            train_every=6,
            frame_skip=4,
            disable_noisy_after=2000000,
            super_hexagon_path='C:\\Program Files (x86)\\Steam\\steamapps\\common\\Super Hexagon\\superhexagon.exe',
            run_afap=True):

        # training objects
        self.memory_buffer = MemoryBuffer(
            capacity_per_level,
            SuperHexagonInterface.n_levels,
            n_frames,
            SuperHexagonInterface.frame_size,
            SuperHexagonInterface.frame_size_cropped,
            gamma,
            device=device)
        self.net = Network(n_frames, SuperHexagonInterface.n_actions,
                           n_atoms).to(device)
        self.target_net = Network(n_frames, SuperHexagonInterface.n_actions,
                                  n_atoms).to(device)
        self.target_net.load_state_dict(self.net.state_dict())
        self.optimizer = torch.optim.Adam(self.net.parameters(),
                                          lr=lr,
                                          eps=1.5e-4)
        self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer, ExpLrDecay(lr_decay, min_factor=.1))

        # parameters
        self.batch_size = batch_size
        self.update_target_net_every = update_target_net_every
        self.train_every = train_every
        self.frame_skip = frame_skip
        self.disable_noisy_after = disable_noisy_after
        self.warmup_steps = warmup_steps
        self.gamma = gamma
        self.device = device

        # parameters for distributional
        self.n_atoms = n_atoms
        self.v_min = v_min
        self.v_max = v_max
        self.delta_z = (v_max - v_min) / (n_atoms - 1)
        self.support = torch.linspace(v_min,
                                      v_max,
                                      n_atoms,
                                      dtype=torch.float,
                                      device=device)
        self.offset = torch.arange(0,
                                   batch_size * n_atoms,
                                   n_atoms,
                                   device=device).view(-1, 1)
        self.m = torch.empty((batch_size, n_atoms), device=device)

        # debug and logging stuff
        self.list_steps_alive = [[]
                                 for _ in range(SuperHexagonInterface.n_levels)
                                 ]
        self.longest_run = [(0, 0)] * SuperHexagonInterface.n_levels
        self.total_simulated_steps = [0] * SuperHexagonInterface.n_levels
        self.losses = []
        self.kls = []
        self.times = []
        self.iteration = 0

        self.super_hexagon_path = super_hexagon_path
        self.run_afap = run_afap
Exemple #11
0
    net_path = 'super_hexagon_net'

    n_frames = 4
    frame_skip = 4
    log_every = 1000
    n_atoms = 51

    # setup
    fp, fcp = np.zeros(
        (1, n_frames, *SuperHexagonInterface.frame_size),
        dtype=np.bool), np.zeros(
            (1, n_frames, *SuperHexagonInterface.frame_size_cropped),
            dtype=np.bool)
    support = np.linspace(-1, 0, n_atoms)

    net = Network(n_frames, SuperHexagonInterface.n_actions,
                  n_atoms).to(device)
    net.load_state_dict(torch.load(net_path, map_location=device))
    net.eval()

    game = SuperHexagonInterface(frame_skip=frame_skip, run_afap=False)
    game.select_level(level)

    list_times_alive = []
    f, fc = game.reset()

    # helper function
    def to_torch_tensor(x):
        return torch.from_numpy(x).to(device).float()

    # global no_grad
    torch.set_grad_enabled(False)
Exemple #12
0
    def __init__(self, graph_args, adv_args, in_shape):
        # arg unpacking
        self.act_dim = graph_args['act_dim']
        ## conv operations params
        n_hidden = graph_args['n_hidden']
        hid_size = graph_args['hid_size']
        conv_depth = graph_args['conv_depth']

        ## training params
        self.learning_rate = graph_args['learning_rate']
        self.num_target_updates = graph_args['num_target_updates']
        self.num_grad_steps_per_target_update = graph_args[
            'num_grad_steps_per_target_update']
        self.gamma = adv_args['gamma']

        # class similar actions => easier to predict
        self.setup_action_classes()

        self.act, self.adv = self.define_placeholders()
        self.obs = tf.placeholder(shape=in_shape, dtype=tf.float32)
        self.n_obs = tf.placeholder(shape=in_shape, dtype=tf.float32)

        # policy / actor evaluation with encoded state
        self.half_policy_distrib = Network(self.obs, None, 'policy_start', \
            hid_size, conv_depth)
        self.half_policy_distrib_2 = Network(self.n_obs, None, 'policy_start', \
            hid_size, conv_depth, reuse=True)
        self.policy_distrib = Network(self.half_policy_distrib, self.act_dim,  \
            'policy_out', hid_size, n_hidden_dense=n_hidden)

        self.greedy_action = tf.argmax(self.policy_distrib, axis=1)

        self.n_act_sample = 1
        self.sample_action = tf.random.multinomial(
            tf.nn.softmax(self.policy_distrib), self.n_act_sample)

        # policy update
        action_enc = tf.one_hot(self.act, depth=self.act_dim)
        self.logprob = -1 * tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=self.policy_distrib, labels=action_enc)
        self.actor_loss = -tf.reduce_mean(self.logprob * self.adv -
                                          1e-3 * self.logprob)
        actor_optim = tf.train.AdamOptimizer(self.learning_rate)
        self.actor_update_op = actor_optim.minimize(self.actor_loss)

        # record gradients
        self.grads = actor_optim.compute_gradients(self.actor_loss)
        for grad in self.grads:
            tf.summary.histogram("{}-grad".format(grad[1].name), grad)
        self.merged = tf.summary.merge_all()

        # critic definition with encoded state
        self.v_target = tf.placeholder(shape=(None, ),
                                       name='v_target',
                                       dtype=tf.float32)
        self.v_pred = tf.squeeze(
            Network(self.obs,
                    1,
                    'critic',
                    hid_size,
                    conv_depth=conv_depth,
                    n_hidden_dense=n_hidden))
        self.critic_loss = tf.losses.mean_squared_error(
            self.v_target, self.v_pred)
        self.critic_update_op = tf.train.AdamOptimizer(
            self.learning_rate).minimize(self.critic_loss)

        # action neural network def
        actnn_layers = graph_args['actnn_layers']
        actnn_units = graph_args['actnn_units']
        self.actnn_learning_rate = graph_args['actnn_learning_rate']
        self.nclasses = graph_args['actnn_nclasses']

        # placeholders act_i, obs_i, obs_i+1
        self.prev_act_ph = tf.placeholder(shape=(None, ), dtype=tf.int32)
        self.actnn_prev_obs_ph = self.half_policy_distrib
        self.actnn_obs_ph = self.half_policy_distrib_2

        # concat & network pass
        multi_obs_enc = tf.concat([self.actnn_prev_obs_ph, self.actnn_obs_ph],
                                  axis=-1)
        self.actnn_pred = dense_pass(multi_obs_enc, self.nclasses,
                                     actnn_layers, actnn_units)
        action_enc = tf.one_hot(self.prev_act_ph, depth=self.nclasses)

        # update operations
        self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(
            logits=self.actnn_pred, labels=action_enc)
        self.train_step = tf.train.AdamOptimizer(
            self.actnn_learning_rate).minimize(self.loss)
Exemple #13
0
def main():
    """
    The main part of this program.
    All necessary threads are started and monitored.
    :return:
    """
    # remember the modification timestamp of the config; if the config gets changed, we can realod it!
    config_timestamp = 0 if not args.config else os.stat(
        importlib.util.find_spec("config").origin).st_mtime

    led = LedStatusMonitor()
    led.start()

    gopro = GoPro(args.background or args.quiet, args.ignore, args.max_time,
                  args.log_invisible)
    if args.config:
        gopro.setUserSettings({
            'FRAME_RATE':
            config.fps if 'fps' in vars(config) else None,
            'FOV':
            config.fov if 'fov' in vars(config) else None,
            'RESOLUTION':
            config.resolution if 'resolution' in vars(config) else None,
        })
    gopro.start()

    teams = config.teams if args.config and 'teams' in vars(config) else None
    #gameLogger = GameLoggerSql(os.path.join(os.path.dirname(__file__), 'logs/game.db'), teams)
    gameLogger = GameLoggerLog(
        os.path.join(os.path.dirname(__file__), 'logs/'), teams,
        args.log_invisible)
    gameLogger.start()

    gameController = GameController(args.gc_source)
    gameController.start()

    network = Network(args.device, args.ssid, args.passwd, args.retries,
                      args.mac)
    network.start()

    # monitor threads and config
    threads = [led, gopro, gameLogger, gameController, network]
    try:
        while True:
            #print(blackboard)
            # if config was loaded from file and file was modified since last checked
            if args.config and config_timestamp != os.stat(
                    importlib.util.find_spec("config").origin).st_mtime:
                config_timestamp = os.stat(
                    importlib.util.find_spec("config").origin).st_mtime
                try:
                    # reload config from file
                    importlib.reload(config)
                    Logger.info("Reloaded modified config")
                    network.setConfig(None, config.ssid, config.passwd,
                                      config.retries, config.mac)
                    gameController.setSource(config.gc_source)
                    gopro.setUserSettings({
                        'FRAME_RATE':
                        config.fps if 'fps' in vars(config) else None,
                        'FOV':
                        config.fov if 'fov' in vars(config) else None,
                        'RESOLUTION':
                        config.resolution
                        if 'resolution' in vars(config) else None,
                    })
                except Exception as e:
                    Logger.error("Invalid config! " + str(e))
            else:
                # do nothing
                time.sleep(1)
                for t in threads:
                    if not t.is_alive():
                        Logger.error("Thread %s is not running (anymore)!",
                                     str(t.__class__.__name__))
    except (KeyboardInterrupt, SystemExit):
        print("Shutting down ...")

    # cancel threads
    led.cancel()
    gopro.cancel()
    gameLogger.cancel()
    gameController.cancel()
    network.cancel()
    # wait for finished threads
    led.join()
    gopro.join()
    gameLogger.join()
    gameController.join()
    network.join()

    print("Bye")
Exemple #14
0
        self.batch_rewards = None


if  __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("-e", "--environment", help="Gym environment to train on", default='CartPole-v0')
    parser.add_argument("-hi", "--hidden_size", help="Hidden layer sizes, separated by whitespace", default='16')
    parser.add_argument("-t", "--training_episodes", help="Maximum number of episodes to train", default=100000)
    parser.add_argument("-a", "--alpha", help="Step size", type=float, default=.003)
    parser.add_argument("-g", "--goal_return", help="Goal return", type=float, default=195)
    parser.add_argument("-b", "--batch_size", help="Episodes per gradient update", type=int, default=100)
    args = parser.parse_args()

    args.hidden_size = [int(val) for val in args.hidden_size.split()]

    env = gym.make(args.environment)
    use_cuda = torch.cuda.is_available()

    state_shape  = list(env.reset().shape)
    state_shape  = [1] + state_shape[:-1]
    action_count = np.product(env.action_space.shape)
    network      = Network(state_shape, args.hidden_size, action_count)

    agent = PGAgent(env, network, batch_size=1, alpha=args.alpha, use_cuda=use_cuda)
    _time = time()
    converged = agent.train(args.training_episodes, goal_return=args.goal_return, smoothing_eps=100)
    if converged:
        print("Solved in %.1f minutes" % ((time() - _time) / 60.0))
    else:
        print("Failed to converge")
Exemple #15
0
            value_loss.backward()
            self.value_opt.step()


if  __name__ == '__main__':
    ENVIRONMENT   = 'Pong-ram-v0'
    CONV_LAYERS   = None#[(1, 8, 3, True), (8, 8, 3, True), (8, 8, 3, True), (8, 8, 3, True)]
    HIDDEN_LAYERS = [256]
    TRAIN_EPS     = 1000 * 1000 * 1000

    env      = gym.make(ENVIRONMENT)
    use_cuda = torch.cuda.is_available()

    state_shape    = list(env.reset().shape)
    state_shape    = [1] + state_shape[:-1]
    action_count   = np.product(env.action_space.shape)
    policy_network = Network(state_shape, HIDDEN_LAYERS, action_count, conv=CONV_LAYERS)
    value_network  = Network(state_shape, HIDDEN_LAYERS, 1, conv=CONV_LAYERS, softmax=False)

    agent = ACAgent(env,
                    policy_network,
                    value_network,
                    alpha=.003,
                    gamma=.99,
                    memory_size=10000,
                    batch_size=1024,
                    use_cuda=use_cuda)
    _time = time()
    agent.train(TRAIN_EPS, goal_return=15, smoothing_eps=1)
    print("Solved in %.1f minutes" % ((time() - _time) / 60.0))
Exemple #16
0
import copy

from PIL import Image
from torchvision.transforms.functional import to_tensor
import numpy as np
from config import *
from utils import prepare_input, Network
from matplotlib import pyplot as plt
import matplotlib.patches as patches
import sys

if __name__ == '__main__':
    assert len(sys.argv) > 1

    model = Network().double().to(device)
    data: dict = torch.load(model_filename)
    model.load_state_dict(data['model_state_dict'])
    mean = data['mean']
    std = data['std']
    img = np.array(Image.open(sys.argv[1]).convert('L'))
    assert img.shape == (28, 28)
    image_orig = copy.deepcopy(img)
    img, boundaries = prepare_input((img - mean) / std)

    with torch.no_grad():
        model.train(False)
        res = model(to_tensor(img).to(device).view(1, *img.shape))
        a = (res[:, k:k + k * n].reshape(k, n) * std + mean).cpu().data.numpy()
        p = res[:, :k].reshape(-1).cpu().data.numpy()

        h = sum(p[i] * a[i] for i in range(k))
Exemple #17
0
class MainApp (QWidget):
    
    def __init__ (self):
        ''' Construtor não modularizado '''
        # Construtor da classe pai:
        super().__init__()
        self.ui = Ui_mainWindow()
        self.ui.setupUi(self)
        self.show()
        # Construtor de parâmetros:
        self.setupParams()
        # Construtor dos componentes de controle:
        self.setupControl()
        #  Construtor de parâmetros:
        self.setupCallbacks()
        # Construtor dos componentes do PyQt:
        self.setupWidgets()        
    
    def setupWidgets (self):
        ''' Incializa Widgets do PyQt: '''
        # Atualiza a referencia quando mudado o dial:
        def updateRef ():
            self.rk = 6.283 * self.ui.dial.value() / 100.0;
            self.ui.labelReferencia.setText ("{:.6f}".format(self.rk))
        # Atualiza valor do backlash estimado:
        def updateAlpha (upDown):
            if upDown > 0 and self.alpha < self.BACKLASH_MAX:
                self.alpha += self.BACKLASH_STEP # 0.1 mm/click
                self.ui.labelBacklashEstimado.setText("{:.6f}".format(self.alpha))
                #self.regulator.apply (upDown)
            elif upDown < 0 and self.alpha >= self.BACKLASH_STEP:
                self.alpha -= self.BACKLASH_STEP # -0.1 mm/click
                self.ui.labelBacklashEstimado.setText("{:.6f}".format(self.alpha))
                #self.regulator.apply (upDown)
            print (self.alpha)
            self.network.alpha = self.alpha
            self.plant.alpha = self.alpha
            
        def updateFeedback ():
            # Alterna estado T/F:
            self.feedbackLock = not self.feedbackLock
            # Se desligar a rede, reiniciar controladores:
            if not self.feedbackLock:
                self.ek1 = 0.; self.uk1 = 0.
                self.feedbackTime = time.time()
                self.plant = Plant (self.alpha)
            # Mostra estado
            print (self.feedbackLock)
                
        # Conecta slots:
        self.ui.btnBacklashUp.clicked.connect (lambda : updateAlpha (1))
        self.ui.btnBacklashDown.clicked.connect (lambda : updateAlpha (-1))
        self.ui.dial.valueChanged.connect (updateRef)
        self.ui.checkBoxMalha.stateChanged.connect (updateFeedback)
        
            
    def setupParams (self):
        ''' Inicializa parâmetros do programa: '''
        self.feedbackLock = False
        self.simulTime = 0.5
        self.initialTime = time.time()
        self.Ts = 0.0125
        self.rk = 0.
        self.yk = 0.
        self.alpha = 0.
        self.cek = 57.; self.cek1 = -38.98; self.cuk1 = 0.3679
        self.ek1 = 0.; self.uk1 = 0.
        self.BACKLASH_MAX = 0.1
        self.BACKLASH_STEP = 5.3267e-4
        self.rPlot = []
        self.yPlot = []
        
        
    def setupControl(self):
        ''' Inicializa blocos de controle: '''
        self.network = Network(self.alpha)
        self.plant = Plant (self.alpha)
        #self.input = Input (9, 10)
        #self.output = Output (11, 12)
        #self.regulator = BacklashRegulator (13, 14)
        
    def setupCallbacks(self):
        ''' Inicializa processo de callbacks: '''
        # Usa QTimer para rodar processo:
        self.timer = QTimer(self)
        # Conecta slots:
        self.timer.timeout.connect (self.feedbackSystem)
        #self.timer.timeout.connect (self.backlashRegulator)
        self.timer.timeout.connect (self.updateGraphics)
        
        self.timer.start (int(self.Ts * 1000))
    
    def feedbackSystem (self):
        ''' Executa tarefa de controle: '''
        if self.feedbackLock:
            # Atualiza backlash:
            self.network.alpha = self.alpha
            # Calcula uk:
            self.yk = self.plant.yk #self.input.value
            ek = (self.rk - self.yk)
            uk  = (self.cek * ek) + (self.cek1 * self.ek1) 
            uk += (self.cuk1 * self.uk1)
            # Aplica na RNA:
            xk = np.reshape ([uk, self.uk1], (1, 2))
            if self.ui.actionRede_neural.isChecked():
                wk = self.network.apply(xk)
            else:
                wk = self.network.bypass(xk)
            # Aplica na planta:
            #self.output.apply (wk)
            self.plant.apply(wk)
            # Atualiza sinais:
            self.uk1 = uk
            self.ek1 = ek
        
    def updateGraphics (self):
        self.rPlot.append (self.rk)
        self.yPlot.append (self.yk)
        
        if (len (self.yPlot) >= self.simulTime / self.Ts):
            
            t = np.linspace (0, self.simulTime, len (self.yPlot))
           
            self.ui.MplWidget.canvas.axes.clear()
            self.ui.MplWidget.canvas.axes.grid(True)
            
            self.ui.MplWidget.canvas.axes.step(t, self.rPlot)
            self.ui.MplWidget.canvas.axes.step(t, self.yPlot)
            
            self.ui.MplWidget.canvas.axes.legend(('referência', 'planta'))
        
            self.ui.MplWidget.canvas.draw()
            
            self.rPlot = []
            self.yPlot = []
            
        ''' Demorado 
Exemple #18
0
    np.random.seed(random_seed)
    np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]

train_sampler: SubsetRandomSampler = SubsetRandomSampler(train_indices)
test_sampler: SubsetRandomSampler = SubsetRandomSampler(val_indices)

my_set = ImageSet(images, images_orig, boundaries)
train_loader: DataLoader = DataLoader(my_set,
                                      batch_size=batch_size,
                                      sampler=train_sampler)
val_loader: DataLoader = DataLoader(my_set,
                                    batch_size=batch_size,
                                    sampler=test_sampler)

model: Network = Network().double().to(device)
optimizer = Adam(model.parameters(), lr)

min_loss = float('inf')
best_model = copy.deepcopy(model)
for e in range(epoch):
    train_loss = 0
    val_loss = 0
    model.train(True)
    for x, orig, bound in tqdm(train_loader,
                               desc='Training: ',
                               position=0,
                               leave=True):
        x = x.to(device)
        orig = orig.to(device)
        optimizer.zero_grad()
Exemple #19
0
 def setupControl(self):
     ''' Inicializa blocos de controle: '''
     self.network = Network(self.alpha)
     self.plant = Plant (self.alpha)
def center():

    IP_center, PORT_center, nodes = net.getAddressCenter(
        'config/center.ini')  # get informations of center and nodes
    idList = []  # list of username of users

    for node in nodes:
        idList.append(node[0])

    nonceList = []

    try:  # listen for others' login connections
        sock1 = net.sk_listen(IP_center, PORT_center)
    except:
        print("Connection fail")

    while True:
        flag_addr = True
        connection1, IP_address = sock1.accept()
        # If nodes connected, verify the correction of username and password
        while True:
            """
            Receive login data from nodes
            """
            data = connection1.recv(1024)
            datatuple = tuple(eval(data.decode('utf-8')))
            if flag_addr and type(datatuple[2]) == int:
                IP_user = datatuple[1]
                PORT_user = datatuple[2]
                flag_addr = False
            if datatuple[0] == 0:
                """
                Send nounce to user for login
                """
                nonce = str(random.randint(0, 99999999)).zfill(8)
                nonceList.append(nonce)
                nonceback = ('nonce', nonce, len(nonceList)).__str__()
                net.sendback(nonceback, IP_user, PORT_user)
            elif datatuple[0] == 'login':
                """
                Verification of username and hash value recevied
                """
                if datatuple[1] not in idList:
                    feedback = ("noid", ).__str__()
                    net.sendback(feedback, IP_user, PORT_user)
                else:
                    password, ip, port = net.getUserinfo(datatuple[1])
                    if datatuple[2] == hash256(password +
                                               nonceList[datatuple[3] - 1]):
                        tuplefeedback = ('loginok', idList)
                        feedback = tuplefeedback.__str__()
                        net.sendback(feedback, ip, port)
                    else:
                        feedback = ('loginfail', ).__str__()
                        net.sendback(feedback, ip, port)
            break
Exemple #21
0
class RegisterScreen(Screen):
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)
        self.registering = False
        self.registering_status = None
        self.username = None

        # load the assets
        self.buttons = load("assets/buttons.png").convert_alpha()

        # create the input boxes
        self.username_box = InputBox((10, 90), (300, 40))
        self.password_box = InputBox((10, 170), (300, 40), type="password")
        self.email_box = InputBox((10, 250), (300, 40))

    def update(self, delta_time):
        if self.registering:
            print(self.network.get_register_status().status_code)
            registering_result = self.network.get_register_status()
            status_code = registering_result.status_code
            if status_code == 200:
                # successfully registered, show the user
                self.registering = False
                self.registering_status = [
                    f"{self.username}",
                    "Successfully registered!",
                    "You can login now...",
                ]
            elif status_code == 400:
                # invalid username/password/email supplied
                self.registering_status = [
                    "Invalid information supplied!",
                    "Username/password must have",
                    "5 characters or more,",
                    "email must be valid",
                ]
                self.registering = False
            elif status_code == 409:
                # username already exists
                self.registering_status = [
                    "Username already exists!",
                    "Try a different username",
                ]
                self.registering = False
            elif status_code != 0:
                # some unknown error occured
                self.registering_status = f"ERROR: status code: {status_code}"
                self.registering = False

    def draw(self):
        Screen.draw(self)

        self._draw_centered_text("Register to Mr Nom", (160, 30))
        self._draw_left_align_text("Username", (10, 60))
        self.username_box.draw(self.pg_screen)
        self._draw_left_align_text("Password", (10, 140))
        self.password_box.draw(self.pg_screen)
        self._draw_left_align_text("Email", (10, 220))
        self.email_box.draw(self.pg_screen)
        self._draw_centered_text("REGISTER", (160, 330))

        self.pg_screen.blit(self.buttons, (0, 415), (64, 64, 64, 64))

        # draw text last, so it overlaps everything (if overlapping happens)
        if self.registering_status:
            if type(self.registering_status) == list:
                offset = 370
                for line in self.registering_status:
                    self._draw_centered_text(line, (160, offset))
                    offset += 25
            else:  # string
                self._draw_centered_text(self.registering_status, (160, 370))

    def _draw_centered_text(self, text, center_coord, color=(0, 0, 0)):
        text = self.font.render(text, True, color)
        text_rect = text.get_rect()
        text_rect.center = center_coord
        self.pg_screen.blit(text, text_rect)

    def _draw_left_align_text(self, text, top_left, color=(0, 0, 0)):
        text = self.font.render(text, True, color)
        text_rect = text.get_rect()
        text_rect.topleft = top_left
        self.pg_screen.blit(text, text_rect)

    def mouse_down(self, pos):
        self.username_box.mouse_down(pos)
        self.password_box.mouse_down(pos)
        self.email_box.mouse_down(pos)

        if self.pos_between(pos, (0, 415), (64, 479)):
            self.reset()
            return {"screen": "main_menu", "play_sound": "click"}
        elif self.pos_between(pos, (100, 311), (220, 339)):
            if not self.registering:
                # set the screen properties
                self.registering = True
                self.registering_status = "Registering..."
                self.username = self.username_box.text
                print("registering...")

                # send registering request
                username = self.username_box.text
                password = self.password_box.text
                email = self.email_box.text
                self.network.perform_register(username, password, email)

    def key_press(self, event):
        self.username_box.key_press(event)
        self.password_box.key_press(event)
        self.email_box.key_press(event)

    def reset(self):
        self.username_box.reset()
        self.password_box.reset()
        self.email_box.reset()
        self.registering = False
        self.registering_status = None
Exemple #22
0
class LoginScreen(Screen):
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)
        self.logging_in = False
        self.logging_in_status = None
        self.username = None

        # load the assets
        self.buttons = load("assets/buttons.png").convert_alpha()

        # create the input boxes
        self.username_box = InputBox((10, 150), (300, 40))
        self.password_box = InputBox((10, 230), (300, 40), type="password")

    def update(self, delta_time):
        if self.logging_in:
            print(self.network.get_login_result().status_code)
            logging_in_result = self.network.get_login_result()
            status_code = logging_in_result.status_code
            if status_code == 200:
                # put the access_token and username in the network _cache
                access_token = logging_in_result.data.get("access_token")
                self.network._cache["access_token"] = access_token
                self.network._cache["username"] = self.username
                self.logging_in = False
                self.logging_in_status = [
                    f"{self.username}",
                    "Successfully logged in!",
                ]
            elif status_code == 401:
                # non-existing user supplied
                self.logging_in_status = [
                    "Supplied username and",
                    "password not found!",
                ]
                self.logging_in = False
            elif status_code != 0:
                # some unknown error occured
                self.logging_in_status = f"ERROR: status code: {status_code}"
                self.logging_in = False

    def draw(self):
        Screen.draw(self)

        self._draw_centered_text("Login to Mr Nom", (160, 60))
        self._draw_left_align_text("Username", (10, 120))
        self.username_box.draw(self.pg_screen)
        self._draw_left_align_text("Password", (10, 200))
        self.password_box.draw(self.pg_screen)
        self._draw_centered_text("LOGIN", (160, 310))

        self.pg_screen.blit(self.buttons, (0, 415), (64, 64, 64, 64))

        # draw text last, so it overlaps everything (if overlapping happens)
        if self.logging_in_status:
            if type(self.logging_in_status) == list:
                offset = 350
                for line in self.logging_in_status:
                    self._draw_centered_text(line, (160, offset))
                    offset += 25
            else:  # string
                self._draw_centered_text(self.logging_in_status, (160, 350))

    def _draw_centered_text(self, text, center_coord, color=(0, 0, 0)):
        text = self.font.render(text, True, color)
        text_rect = text.get_rect()
        text_rect.center = center_coord
        self.pg_screen.blit(text, text_rect)

    def _draw_left_align_text(self, text, top_left, color=(0, 0, 0)):
        text = self.font.render(text, True, color)
        text_rect = text.get_rect()
        text_rect.topleft = top_left
        self.pg_screen.blit(text, text_rect)

    def mouse_down(self, pos):
        self.username_box.mouse_down(pos)
        self.password_box.mouse_down(pos)

        if self.pos_between(pos, (0, 415), (64, 479)):
            self.reset()
            return {"screen": "main_menu", "play_sound": "click"}
        elif self.pos_between(pos, (120, 291), (201, 320)):
            if not self.logging_in:
                # set the screen properties
                self.logging_in = True
                self.logging_in_status = "Logging in..."
                self.username = self.username_box.text
                print("logging in...")

                # send login request
                username = self.username_box.text
                password = self.password_box.text
                self.network.perform_login(username, password)

    def key_press(self, event):
        self.username_box.key_press(event)
        self.password_box.key_press(event)

    def reset(self):
        self.username_box.reset()
        self.password_box.reset()
        self.logging_in = False
        self.logging_in_status = None
Exemple #23
0
class HighscoresScreen(Screen):
    def __init__(self, pg_screen, screen_size):
        Screen.__init__(self, pg_screen, screen_size)
        self.pg_screen = pg_screen
        self.screen_size = screen_size
        self.network = Network()
        self.font = Font(None, 30)

        self.fetch_global_highscores = True
        self.fetch_personal_highscores = True
        self.global_highscores_page = 0
        self.personal_highscores_page = 0
        self.highscore_page_idx = 0

        # load the assets
        self.mainmenu = load("assets/mainmenu.png").convert_alpha()
        self.numbers = load("assets/numbers.png").convert_alpha()
        self.buttons = load("assets/buttons.png").convert_alpha()

    def update(self, delta_time):
        if self.highscore_page_idx == 0 and self.fetch_global_highscores:
            self.fetch_global_highscores = False
            result = self.network.fetch_global_highscores()
            if not result["result"]:
                print(result["status"])
            print(result)

        if self.highscore_page_idx == 1 and self.fetch_personal_highscores:
            self.fetch_personal_highscores = False
            result = self.network.fetch_local_highscores()
            if not result["result"]:
                print(result["status"])
            print(result)

    def draw(self):
        Screen.draw(self)

        # draw the current highscore screen text
        string = "GLOBAL" if self.highscore_page_idx == 0 else "PERSONAL"
        text = self.font.render(string, True, (0, 0, 0))
        text_rect = text.get_rect()
        text_rect.center = (160, 15)
        self.pg_screen.blit(text, text_rect)
        self.pg_screen.blit(self.mainmenu, (64, 20), (0, 42, 196, 42))

        if self.highscore_page_idx == 0:
            if self.network.get_global_highscores().status_code == 200:
                self._draw_global_highscores()
            elif self.network.get_global_highscores().status_code != 0:
                self._draw_failed_highscore_fetch("global")
        elif self.highscore_page_idx == 1:
            if self.network.get_local_highscores().status_code == 200:
                self._draw_personal_highscores()
            elif self.network.get_local_highscores().status_code != 0:
                self._draw_failed_highscore_fetch("personal", "not logged in!")

        # notify user that there is another highscores page
        self._draw_centered_text("Click highscores page for", (190, 430))
        which = "PERSONAL" if self.highscore_page_idx == 0 else "GLOBAL"
        self._draw_centered_text(f"{which} highscores", (190, 460))

        self.pg_screen.blit(self.buttons, (0, 415), (64, 64, 64, 64))

    def _draw_failed_highscore_fetch(self, which, second=None):
        self._draw_centered_text("failed to get", (160, 85))
        self._draw_centered_text("{} highscores!".format(which), (160, 110))

        if second:
            self._draw_centered_text(second, (160, 240))

    def _draw_global_highscores(self):
        # draw the titles of the highscore columns
        color = (150, 150, 150)
        self._draw_left_align_text("Username", (10, 70), color)
        self._draw_left_align_text("Score", (150, 70), color)
        self._draw_left_align_text("Time", (240, 70), color)

        # draw up to 5 of the highscores themselves
        highscores = self.network.get_global_highscores().data
        highscores_length = len(highscores) - 5 * self.global_highscores_page
        for i in range(5 if highscores_length > 5 else highscores_length):
            highscore = highscores[i + 5 * self.global_highscores_page]
            self._draw_left_align_text(highscore["user"], (10, 110 + i * 45))
            self._draw_numbers(highscore["score"], (150, 100 + i * 45))
            self._draw_numbers(highscore["time"] // 1000, (240, 100 + i * 45))

        # draw pagination stuff
        self._draw_pagination(len(highscores), self.global_highscores_page)

    def _draw_personal_highscores(self):
        # draw the titles of the highscore columns
        color = (150, 150, 150)
        self._draw_left_align_text("Score", (10, 70), color)
        self._draw_left_align_text("Time", (160, 70), color)

        # draw up to 5 of the highscores themselves
        highscores = self.network.get_local_highscores().data
        highscores_length = len(highscores) - 5 * self.personal_highscores_page
        for i in range(5 if highscores_length > 5 else highscores_length):
            highscore = highscores[i + 5 * self.personal_highscores_page]
            self._draw_numbers(highscore["score"], (10, 100 + i * 45))
            self._draw_numbers(highscore["time"] // 1000, (160, 100 + i * 45))

        # draw pagination stuff
        self._draw_pagination(len(highscores), self.personal_highscores_page)

    def _draw_pagination(self, highscores_length, highscore_page):
        self.pg_screen.blit(self.buttons, (32, 330), (64, 64, 64, 64))
        self.pg_screen.blit(self.buttons, (224, 330), (0, 64, 64, 64))
        num_pages = highscores_length // 5 + 1 if highscores_length > 0 else 0
        page_string = f"page {highscore_page+1} / {num_pages}"
        self._draw_centered_text(page_string, (160, 362))

    def _draw_numbers(self, numbers, left_top):
        numbers_length = len(str(numbers))
        for i in range(numbers_length):
            x_offset = left_top[0] + i * 20
            x_index = 20 * int(str(numbers)[i])
            self.pg_screen.blit(self.numbers, (x_offset, left_top[1]),
                                (x_index, 0, 20, 32))

    def _draw_centered_text(self, text, center_coord, color=(0, 0, 0)):
        text = self.font.render(text, True, color)
        text_rect = text.get_rect()
        text_rect.center = center_coord
        self.pg_screen.blit(text, text_rect)

    def _draw_left_align_text(self, text, top_left, color=(0, 0, 0)):
        text = self.font.render(text, True, color)
        text_rect = text.get_rect()
        text_rect.topleft = top_left
        self.pg_screen.blit(text, text_rect)

    def mouse_down(self, pos):
        # first handle specific position
        if self.pos_between(pos, (0, 415), (64, 479)):
            self.highscore_page_idx = 0
            self.fetch_global_highscores = True
            self.fetch_personal_highscores = True
            return {"screen": "main_menu", "play_sound": "click"}

        # if the function processes the position, return its result
        result = self._check_highscore_pagination(pos)
        if result:
            return result

        # then handle 'generic' click event
        self.highscore_page_idx += 1
        if self.highscore_page_idx >= 2:
            self.highscore_page_idx = 0
        return {"play_sound": "click"}

    def _check_highscore_pagination(self, pos):
        # check which highscore page is shown, decrement page if possible
        if self.pos_between(pos, (32, 330), (96, 394)):
            if self.highscore_page_idx == 0:
                if self.global_highscores_page > 0:
                    self.global_highscores_page -= 1
                return {"play_sound": "click"}
            elif self.highscore_page_idx == 1:
                if self.personal_highscores_page > 0:
                    self.personal_highscores_page -= 1
                return {"play_sound": "click"}
        # check which highscore page, valid network response and can increment
        elif self.pos_between(pos, (224, 330), (288, 394)):
            if self.highscore_page_idx == 0:
                if self.network.get_global_highscores().status_code == 200:
                    length = len(self.network.get_global_highscores().data)
                    num_pages = length // 5 + 1 if length > 0 else 0
                    if self.global_highscores_page < num_pages - 1:
                        self.global_highscores_page += 1
                return {"play_sound": "click"}
            elif self.highscore_page_idx == 1:
                if self.network.get_local_highscores().status_code == 200:
                    length = len(self.network.get_local_highscores().data)
                    num_pages = length // 5 + 1 if length > 0 else 0
                    if self.personal_highscores_page < num_pages - 1:
                        self.personal_highscores_page += 1
                return {"play_sound": "click"}