Пример #1
0
class Actor_Critic():
    def __init__(self, env, sess):
        self.env = env
        self.sess = sess
        self.memory_buffer = Replay_Buffer(BUFFER_SIZE, BATCH_SIZE)
        self.learning_rate = LR
        self.tau = TAU
        self.buffer_size = BUFFER_SIZE
        self.batch_size = BATCH_SIZE
        self.discount = 0.99
        self.Actor = Actor(self.env, self.sess, self.learning_rate, self.tau,
                           self.discount)
        self.Critic = Critic(self.env, self.sess, self.learning_rate, self.tau,
                             self.discount)

    def update_target(self):
        self.Actor.actor_target_update()
        self.Critic.critic_target_update()

    #def train(self):

    def save(self, prefixe):
        self.Actor.save(prefixe)
        self.Critic.save(prefixe)
        self.memory_buffer.save()
Пример #2
0
    def __init__(self, state_dim, action_dim, max_action, discount, tau,
                 policy_noise, noise_clip, policy_freq, device):

        self.state_dim = len(state_dim[0])
        self.action_dim = len(action_dim)
        self.max_action = max_action[2]
        self.actor = Actor(self.state_dim, self.action_dim,
                           self.max_action).to(device)
        self.actor_target = copy.deepcopy(self.actor).float()
        # self.actor_target = Actor(state_dim, action_dim, self.max_action).to(device)
        # self.actor_target.load_state_dict(self.actor.state_dict())
        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
                                                lr=3e-4)  # or 1e-3

        self.critic = Critic(self.state_dim, self.action_dim).to(device)
        self.critic_target = copy.deepcopy(self.critic).float()
        # self.critic_target = Critic(state_dim, action_dim).to(device)
        # self.critic_target.load_state_dict(self.critic.state_dict())
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
                                                 lr=3e-4)  # or 1e-2

        self.device = device
        self.max_action = max_action
        self.discount = discount
        self.tau = tau
        self.policy_noise = policy_noise
        self.noise_clip = noise_clip
        self.policy_freq = policy_freq

        self.total_it = 0
Пример #3
0
    def __init__(self,
                 state_size,
                 action_size,
                 param={},
                 level_name='general'):
        self.seed = 714
        np.random.seed(seed=self.seed)
        self.state_size = state_size
        self.action_size = action_size
        self.dummy_adv = np.zeros((1, 1))
        self.dummy_actions_prob = np.zeros((1, action_size))
        self.actor = Actor(state_size, action_size)
        self.critic = Critic(state_size, action_size)
        self.level_name = level_name
        timestampe = datetime.datetime.now().strftime("%Y_%m_%d_%H%M")
        self.writer = SummaryWriter('logs/%s/%s' %
                                    (self.level_name, timestampe))
        self.best_weight_fn = 'ppo_best_%s_' + level_name.lower().replace(
            '.', '_') + '.h5'
        self.memory = [[], [], [], []]
        self.update_count = 0
        self.cur_ind = 0
        self.GAMMA = 0.99

        self.EXPERIENCE_REPLAY = param.get('EXPERIENCE_REPLAY', False)
        if self.EXPERIENCE_REPLAY is True:
            self.BUFFER_SIZE = param['BUFFER_SIZE']
            self.BATCH_SIZE = param['BATCH_SIZE']
            self.buffer = ReplayBuffer(self.action_size, self.BUFFER_SIZE,
                                       self.BATCH_SIZE, self.seed)
Пример #4
0
    def init_game(self):
        # self.gamemap.load_map_from_json('res/map/test_map.json')
        self.gamemap.create_default_terrain()

        for i in range(3, 6):
            self.actors.append(Actor('soldier', 's', 0, sprite=0xE100, color=TEAM_COLORS[0], x=i, y=1, movement=1,
                                     stats=Stats(3,3,1)))
        for i in range(0, 10, 2):
            self.actors.append(Actor('barbarian', 'b', 1, sprite=0xE101, color=TEAM_COLORS[1], x=i, y=8, movement=2,
                                     stats=Stats(3,2,0)))


        self.actors.append(Actor('king', 'K', 0, sprite=0xE102, color=TEAM_COLORS[0], x=4, y=0,
                                 movement=2, stats=Stats(5,3,4)))
        self.actors.append(Actor('leader', 'L', 1, sprite=0xE103, color=TEAM_COLORS[1], x=4, y=9,
                                 movement=2, stats=Stats(7,4,2)))

        self.actors.append(Actor('Xander', 'S', 2, sprite=0xE104, color=TEAM_COLORS[2], x=5, y=5,
                                 movement=10, stats=Stats(7,40,2)))

        self.turn_to_take = self.actors.copy()
        self.turn_to_take.sort(key=lambda x: x.stats.mod['agility'], reverse=True)
        self.unit_turn = self.turn_to_take.pop(0)
        self.unit_turn.new_turn()
        self.game_state = 'new_turn'
Пример #5
0
class Agent:
    def __init__(self, state_size, action_size):
        self.actor = Actor(state_size, action_size)
        self.critic = Critic(state_size, action_size)
        self.state_batch = []
        self.action_batch = []
        self.next_state_batch = []
        self.reward_batch = []
        self.done_bach = []

    def choose_action(self, state):
        state = np.array([state])
        return self.actor.action(state)[0][0]

    def store(self, state, action, next_state, reward, done):
        self.state_batch.append(state)
        self.action_batch.append(action)
        self.next_state_batch.append(next_state)
        self.reward_batch.append(reward)
        self.done_bach.append(done)

    def train(self):
        state_batch = np.vstack(self.state_batch)
        action_batch = np.vstack(self.action_batch)
        next_state_batch = np.vstack(self.next_state_batch)
        reward_batch = np.vstack(self.reward_batch)
        done_bach = np.vstack(self.done_bach)
        next_action_batch = self.actor.action(next_state_batch)

        self.state_batch = []
        self.action_batch = []
        self.next_state_batch = []
        self.reward_batch = []
        self.done_bach = []
Пример #6
0
	def __init__(self):
		
		# init class members once
		if Player.spriteset is None:
			Player.spriteset = Spriteset.fromfile("hero")
			Player.seq_idle = Sequence.create_sprite_sequence(Player.spriteset, "idle", 4)
			Player.seq_jump = Sequence.create_sprite_sequence(Player.spriteset, "jump", 24)
			Player.seq_run = Sequence.create_sprite_sequence(Player.spriteset, "run", 5)
			Player.spriteset_death = Spriteset.fromfile("effect_death")
			Player.seq_death = Sequence.create_sprite_sequence(Player.spriteset_death, "death-", 5)

		Actor.__init__(self, None, 60, 188)
		self.state = State.Undefined
		self.direction = Direction.Right
		self.xspeed = 0
		self.yspeed = 0
		self.set_idle()
		self.sprite.set_position(self.x, self.y)
		self.width = self.size[0]
		self.height = self.size[1]
		self.medium = Medium.Floor
		self.jump = False
		self.immunity = 0
		self.rectangle = Rectangle(0, 0, self.width, self.height)


		self.palettes = (self.spriteset.palette, Palette.fromfile("hero_alt.act"))
Пример #7
0
 def __init__(self, screen, position, velocity, delay=1):
     Actor.__init__(self, screen, position, 10, velocity)
     self.image = pygame.image.load("images/fly.png").convert_alpha()
     self.image_w, self.image_h = self.image.get_size()
     self.delay = delay
     self.acc_time = 0
     self.alive = True
Пример #8
0
 def agregar_actor(self):
     print("Agregar")
     actor = Actor()
     actor.ingresar()
     self.repository.agregar_actor(actor.key, actor)
     print(actor.nombre + " " + actor.apellido +
           "agregado exitosamente a repositorio.")
Пример #9
0
    def __init__(self, state_size, action_size, random_seed, hyperparams):
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(random_seed)
        self.hyperparams = hyperparams

        self.actor = Actor(state_size, action_size, random_seed).to(device)
        self.actor_noise = Actor(state_size, action_size,
                                 random_seed).to(device)
        self.actor_target = Actor(state_size, action_size,
                                  random_seed).to(device)
        self.actor_optim = optim.Adam(self.actor.parameters(),
                                      lr=hyperparams.alpha_actor)

        self.critic = Critic(state_size, action_size, random_seed).to(device)
        self.critic_target = Critic(state_size, action_size,
                                    random_seed).to(device)
        self.critic_optim = optim.Adam(
            self.critic.parameters(),
            lr=hyperparams.alpha_critic,
            weight_decay=hyperparams.weight_decay,
        )

        self.replay_buffer = ReplayBuffer(hyperparams.buffer_size,
                                          hyperparams.batch_size, random_seed)

        self.noise = OUNoise(
            action_size,
            random_seed,
            self.hyperparams.mu,
            self.hyperparams.theta,
            self.hyperparams.sigma,
        )
Пример #10
0
    def __init__(self, state_size=24, action_size=2, random_seed=0):
        """
        Initializes Agent object.
        @Param:
        1. state_size: dimension of each state.
        2. action_size: number of actions.
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(random_seed)

        #Actor network
        self.actor_local = Actor(self.state_size, self.action_size,
                                 random_seed).to(device)
        self.actor_target = Actor(self.state_size, self.action_size,
                                  random_seed).to(device)
        self.actor_optimizer = optim.Adam(self.actor_local.parameters(),
                                          lr=LR_ACTOR)

        #Critic network
        self.critic_local = Critic(self.state_size, self.action_size,
                                   random_seed).to(device)
        self.critic_target = Critic(self.state_size, self.action_size,
                                    random_seed).to(device)
        self.critic_optimizer = optim.Adam(self.critic_local.parameters(),
                                           lr=LR_CRITIC)

        #Noise proccess
        self.noise = OUNoise(action_size,
                             random_seed)  #define Ornstein-Uhlenbeck process

        #Replay memory
        self.memory = ReplayBuffer(
            self.action_size, BUFFER_SIZE, MINI_BATCH,
            random_seed)  #define experience replay buffer object
Пример #11
0
    def __init__(self,
                 texto="None",
                 x=0,
                 y=0,
                 magnitud=20,
                 vertical=False,
                 fuente=None,
                 fijo=True):
        """Inicializa el actor.

        :param texto: Texto a mostrar.
        :param x: Posición horizontal.
        :param y: Posición vertical.
        :param magnitud: Tamaño del texto.
        :param vertical: Si el texto será vertical u horizontal, como True o False.
        :param fuente: Nombre de la fuente a utilizar.
        :param fijo: Determina si el texto se queda fijo aunque se mueva la camara. Por defecto está fijo.
        """
        self.__magnitud = magnitud
        self.__vertical = vertical
        self.__fuente = fuente
        self.__color = pilas.colores.blanco
        Actor.__init__(self, x=x, y=y)
        self.centro = ("centro", "centro")
        self.fijo = fijo
        self.texto = texto
Пример #12
0
        def onObjectClick(self, event): 
                """function to get movie details on the movie when clicked."""
                

                tempMovie = Movie(2, 4, "132")
                tempMovie = tempMovie.getMovieByOvalId(event.widget.find_closest(event.x, event.y)[0], self.moviesList)

                # get id's of the artists starring in this movie
                artistmovierelation = ArtistMovieRelation(0,0)
                artistStarring = artistmovierelation.getArtistsByMovieId(tempMovie.getMovieId(), self.relationsList)

                tempActor = Actor("","")
                tempActress = Actress("","")

                # fetches the name of the actor or actress. finds out whether it originated from tbe actor or actress class.

                artistsStartingNames = []
                for artistId in artistStarring:
                        actor = tempActor.getArtistByid(artistId, self.actorsList)
                        if actor != None:
                                artistsStartingNames.append(actor.getArtistName())
                        else:
                                actress = tempActress.getArtistByid(artistId, self.actressesList)
                                if actress != None:
                                        artistsStartingNames.append(actress.getArtistName())

                #  labels to show the film details
                self.movieDetailsVar.set('Title of the Film   : ' + tempMovie.getMovieTitle() + "\n" + "\n" 
                                         "Year Film Released : " + tempMovie.getMovieYear() + "\n"+ "\n" 
                                         "Actor/Actress Name  : " + ", ".join(artistsStartingNames))
Пример #13
0
def main(config, max_samples):
    get_env_configs(config)
    ray.init()

    parameter_server = ParameterServer.remote(config)
    replay_buffer = ReplayBuffer.remote(config)
    learner = Learner.remote(config, replay_buffer, parameter_server)

    train_actor_ids = []
    eval_actor_ids = []

    learner.start_learning.remote()

    #   start train actors
    for i in range(config["num_workers"]):
        epsilon = config["max_eps"] * i / config["num_workers"]
        training_actor = Actor.remote("train-" + str(i), replay_buffer,
                                      parameter_server, config, epsilon)
        training_actor.sample.remote()
        train_actor_ids.append(training_actor)

    #   start eval actors
    for i in range(config["eval_num_workers"]):
        epsilon = 0
        eval_actor = Actor.remote("eval-" + str(i),
                                  replay_buffer,
                                  parameter_server,
                                  config,
                                  epsilon,
                                  eval=True)
        eval_actor_ids.append(eval_actor)

    #   fetch samples in loop and sync actor weights
    total_samples = 0
    best_eval_mean_reward = np.NINF
    eval_mean_rewards = []
    while total_samples < max_samples:
        total_env_samples_id = replay_buffer.get_total_env_samples.remote()
        new_total_samples = ray.get(total_env_samples_id)
        num_new_samples = new_total_samples - total_samples
        if num_new_samples >= config["timesteps_per_iteration"]:
            total_samples = new_total_samples
            print("Total samples:", total_samples)
            parameter_server.set_eval_weights.remote()
            eval_sampling_ids = [
                eval_actor.sample.remote() for eval_actor in eval_actor_ids
            ]
            eval_rewards = ray.get(eval_sampling_ids)
            print("Evaluation rewards: {}".format(eval_rewards))
            eval_mean_reward = np.mean(eval_rewards)
            eval_mean_rewards.append(eval_mean_reward)
            print("Mean evaluation reward: {}".format(eval_mean_reward))
            if eval_mean_reward > best_eval_mean_reward:
                print("Model has improved! Saving the model!")
                best_eval_mean_reward = eval_mean_reward
                parameter_server.save_eval_weights.remote()

    print("Finishing the training.\n\n\n\n\n\n")
    [actor.stop.remote() for actor in train_actor_ids]
    learner.stop.remote()
Пример #14
0
 def test_get_damage_returns_modifier_with_dice(self):
     fighter = Fighter(5)
     rogue = Rogue(16)
     strength = Attribute(Attribute.STRENGTH, 16)
     dexterity = Attribute(Attribute.DEXTERITY, 14)
     actor = Actor('Rogue Fighter', [strength, dexterity], [rogue, fighter])
     self.assertEqual('1d3+3', actor.get_attack_damage('base_attack'))
Пример #15
0
    def add_movie(self, movie_name, actors):
        # find whether movie name exist in movie list
        target_moive = next(
            (x for x in self.__movie_list if x.get_movie_name() == movie_name),
            None)
        if target_moive is None:
            target_moive = Movie(movie_name)
            self.__movie_list.append(target_moive)

        movie_actors = target_moive.get_actors()
        for actor in actors:
            # find whether actor name exist in actor list
            target_actor = next(
                (x for x in self.__actor_list if x.get_actor_name() == actor),
                None)
            if target_actor is None:
                target_actor = Actor(actor)
                self.__actor_list.append(target_actor)

            # add the new movie to target_actor
            new_movie_list = target_actor.get_movies()
            new_movie_list.append(target_moive)
            target_actor.set_movies(new_movie_list)

            # add the new actor to target_moive pending list
            movie_actors.append(target_actor)

        target_moive.set_actors(movie_actors)
Пример #16
0
    def _run_remote_tasks(self, signal_queue):
        # The remote actor will actually run on the local machine or other machines of xparl cluster
        remote_actor = Actor(self.game, self.args)

        while True:
            # receive running task signal
            # signal: specify task type and task input data (optional)
            signal = signal_queue.get()

            if signal["task"] == "self-play":
                episode_num_each_actor = self.args.numEps // self.args.actors_num
                result = remote_actor.self_play(
                    self.current_agent.get_weights(), episode_num_each_actor)
                self.remote_actors_return_queue.put({"self-play": result})

            elif signal["task"] == "pitting":
                games_num_each_actor = self.args.arenaCompare // self.args.actors_num
                result = remote_actor.pitting(
                    self.previous_agent.get_weights(),
                    self.current_agent.get_weights(), games_num_each_actor)
                self.remote_actors_return_queue.put({"pitting": result})

            elif signal["task"] == "evaluate_test_dataset":
                test_dataset = signal["test_dataset"]
                result = remote_actor.evaluate_test_dataset(
                    self.current_agent.get_weights(), test_dataset)
                self.remote_actors_return_queue.put(
                    {"evaluate_test_dataset": result})
            else:
                raise NotImplementedError
Пример #17
0
 def test_get_full_attack_returns_list_of_modifiers(self):
     fighter = Fighter(2)
     rogue = Rogue(19)
     actor = Actor('Rogue Fighter', [], [rogue, fighter])
     full_attack = actor.get_full_attack()
     self.assertEqual(4, len(full_attack))
     self.assertEqual(16, full_attack[0].value)
     self.assertEqual(
         '+14 from level 19 Rogue. +2 from level 2 Fighter. +0, Strength ability score of 10. ',
         full_attack[0].audit_explanation
     )
     self.assertEqual(full_attack[1].value, 11)
     self.assertEqual(
         'Additional attack split from base BAB at a +5 breakpoint. See first attack for full audit trail.',
         full_attack[1].audit_explanation
     )
     self.assertEqual(full_attack[2].value, 6)
     self.assertEqual(
         'Additional attack split from base BAB at a +5 breakpoint. See first attack for full audit trail.',
         full_attack[2].audit_explanation
     )
     self.assertEqual(full_attack[3].value, 1)
     self.assertEqual(
         'Additional attack split from base BAB at a +5 breakpoint. See first attack for full audit trail.',
         full_attack[3].audit_explanation
     )
Пример #18
0
def main():
	
	screen = pygame.display.set_mode((WIDTH, HEIGHT))
	pygame.display.set_caption("Engine RPG")

	clock = pygame.time.Clock()
	#rejilla = load_image('resources/graphics/rejilla.png', True)
	
	map_loaded = Map("pruebas.tmx")
	heroe = Actor(map_loaded)
	camara = Camera(map_loaded, heroe)
	inp = Input()
	
	while True:
		time = clock.tick(40)
		inp.update()
		salir(inp.get_key_list())
		
		id = heroe.mover(map_loaded, inp)
		heroe.update(id)
		camara.update(screen, map_loaded, heroe)
		camara.show_fps(screen, clock.get_fps())
		#screen.blit(rejilla, (0, 0))
		
		pygame.display.flip()
	return 0
Пример #19
0
def test_movie():
    # check_boolean_equality_function
    movie = Movie("Moana", 2009)
    print(movie)

    movie3 = Movie("Moana", 2010)
    print(movie3)

    movie2 = Movie("Inception", 2010)
    print(movie2)

    print(movie > movie2)
    print(movie < movie3)
    print(movie3 == movie3)

    # check_remove_actor_in_list_of_actors
    actors = [Actor("Auli'i Cravalho"), Actor("Dwayne Johnson"), Actor("Rachel House"), Actor("Temuera Morrison")]
    for actor in actors:
        movie.add_actor(actor)
    movie.remove_actor(Actor("Auli'i Cravalho"))
    print(movie.actors)

    # check_for_out_of_range_runtime
    movie.runtime_minutes = 121
    print("Movie runtime: {} minutes".format(movie.runtime_minutes))

    movie.external_rating = 30
    print("votes: {}".format(movie.external_rating))
Пример #20
0
    def __init__(self, critic_mode: str, actor_discount_factor: float,
                 e_greedy_rate: float, actor_learning_rate: float,
                 e_greedy_decay_rate: float, critic_learning_rate: float,
                 critic_discount_factor, actor_eligibility_decay_rate: float,
                 critic_eligibility_decay_rate: float, nn_dimensions,
                 state_size, model_fp, pretrained: bool):

        self.actor = Actor(
            discount_factor=actor_discount_factor,
            e_greedy_rate=e_greedy_rate,
            lr=actor_learning_rate,
            e_greedy_decay_rate=e_greedy_decay_rate,
            actor_eligibility_decay_rate=actor_eligibility_decay_rate,
        )

        if critic_mode == "nn":
            self.critic = CriticNN(
                method=critic_mode,
                lr=critic_learning_rate,
                critic_eligibility_decay_rate=critic_eligibility_decay_rate,
                discount_factor=critic_discount_factor,
                nn_dimensions=nn_dimensions,
                input_size=state_size,
                model_fp=model_fp,
                pretrained=pretrained)

        elif critic_mode == "table":
            self.critic = CriticTable(
                method=critic_mode,
                lr=critic_learning_rate,
                critic_eligibility_decay_rate=critic_eligibility_decay_rate,
                discount_factor=critic_discount_factor,
                input_size=state_size)
        else:
            raise ValueError()
Пример #21
0
    def __init__(self, task):
        self.task = task
        self.state_size = task.state_size
        self.action_size = task.action_size
        self.action_low = task.action_low
        self.action_high = task.action_high

        # Actor (Policy) Model
        self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
        self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high)

        # Critic (Value) Model
        self.critic_local = Critic(self.state_size, self.action_size)
        self.critic_target = Critic(self.state_size, self.action_size)

        # Initialize target model parameters with local model parameters
        self.critic_target.model.set_weights(self.critic_local.model.get_weights())
        self.actor_target.model.set_weights(self.actor_local.model.get_weights())

        # Noise process
        self.exploration_mu = 0
        self.exploration_theta = 0.15
        self.exploration_sigma = 0.2
        self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma)

        # Replay memory
        self.buffer_size = 100000
        self.batch_size = 64
        self.memory = ReplayBuffer(self.buffer_size, self.batch_size)

        # Algorithm parameters
        self.gamma = 0.99  # discount factor
        self.tau = 0.01  # for soft update of target parameters
Пример #22
0
    def blame(cls, repo, commit, file):
        """
        The blame information for the given file at the given commit

        Returns
            list: [git.Commit, list: [<line>]]
            A list of tuples associating a Commit object with a list of lines that 
            changed within the given commit. The Commit objects will be given in order
            of appearance.
        """
        data = repo.git.blame(commit, '--', file, p=True)
        commits = {}
        blames = []
        info = None

        for line in data.splitlines():
            parts = re.split(r'\s+', line, 1)
            if re.search(r'^[0-9A-Fa-f]{40}$', parts[0]):
                if re.search(r'^([0-9A-Fa-f]{40}) (\d+) (\d+) (\d+)$', line):
                    m = re.search(r'^([0-9A-Fa-f]{40}) (\d+) (\d+) (\d+)$', line)
                    id, origin_line, final_line, group_lines = m.groups()
                    info = {'id': id}
                    blames.append([None, []])
                elif re.search(r'^([0-9A-Fa-f]{40}) (\d+) (\d+)$', line):
                    m = re.search(r'^([0-9A-Fa-f]{40}) (\d+) (\d+)$', line)
                    id, origin_line, final_line = m.groups()
                    info = {'id': id}
            elif re.search(r'^(author|committer)', parts[0]):
                if re.search(r'^(.+)-mail$', parts[0]):
                    m = re.search(r'^(.+)-mail$', parts[0])
                    info["%s_email" % m.groups()[0]] = parts[-1]
                elif re.search(r'^(.+)-time$', parts[0]):
                    m = re.search(r'^(.+)-time$', parts[0])
                    info["%s_date" % m.groups()[0]] = time.gmtime(int(parts[-1]))
                elif re.search(r'^(author|committer)$', parts[0]):
                    m = re.search(r'^(author|committer)$', parts[0])
                    info[m.groups()[0]] = parts[-1]
            elif re.search(r'^filename', parts[0]):
                info['filename'] = parts[-1]
            elif re.search(r'^summary', parts[0]):
                info['summary'] = parts[-1]
            elif parts[0] == '':
                if info:
                    c = commits.has_key(info['id']) and commits[info['id']]
                    if not c:
                        c = Commit(repo, id=info['id'],
                                         author=Actor.from_string(info['author'] + ' ' + info['author_email']),
                                         authored_date=info['author_date'],
                                         committer=Actor.from_string(info['committer'] + ' ' + info['committer_email']),
                                         committed_date=info['committer_date'],
                                         message=info['summary'])
                        commits[info['id']] = c

                    m = re.search(r'^\t(.*)$', line)
                    text,  = m.groups()
                    blames[-1][0] = c
                    blames[-1][1].append( text )
                    info = None

        return blames
Пример #23
0
def initialize_data():
    """
    initialize database using actor and movie data from scraped json files
    """
    actor_data = json.load(open("actor.json"))
    movie_data = json.load(open("movie.json"))
    movie_objects = {}
    actor_objects = {}
    #logger.info('load data from json and prepare to construct data structure')

    for movie in movie_data:
        new_movie = Movie(movie["movieName"], movie["movieYear"], normalize_grossing(movie["movieGrossing"]), [])
        movie_objects[new_movie.name] = new_movie
        for actor in movie["movieStaring"]:

            for available_actor in actor_data:
                if available_actor["actorName"] == actor:

                    if available_actor["actorName"] not in actor_objects:
                        actor_objects[available_actor["actorName"]] = Actor(available_actor["actorName"], normalize_age(available_actor["actorAge"]), [], 0)
                    actor_objects[available_actor["actorName"]].act_movie.append(new_movie)
                    if new_movie.grossing != None:
                        actor_objects[available_actor["actorName"]].total_grossing+=new_movie.grossing

                    new_movie.attend_actor.append(actor_objects[available_actor["actorName"]])
                    break

    for actor in actor_data:
        if actor["actorName"] not in actor_objects:
            actor_objects[actor["actorName"]] = Actor(actor["actorName"], normalize_age(actor["actorAge"]), [], 0)

    return movie_objects, actor_objects
Пример #24
0
    def __init__(self, state_size, action_size):
        """
        Initializes Agent object.
        @Param:
        1. state_size: dimension of each state.
        2. action_size: number of actions.
        """
        self.state_size = state_size
        self.action_size = action_size
        
        #Actor network
        self.actor_local = Actor(self.state_size, self.action_size).to(device) #local model
        self.actor_target = Actor(self.state_size, self.action_size).to(device) #target model, TD-target
        self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR) #initialize optimizer using Adam as regularizer for Actor network.

        #Critic network
        self.critic_local = Critic(self.state_size, self.action_size).to(device) #local model
        self.critic_target = Critic(self.state_size, self.action_size).to(device) #target model, TD-target
        self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY) #initialize optimizer using Adam as regularizer for Critic network.

        #Noise proccess
        self.noise = OUNoise(action_size) #define Ornstein-Uhlenbeck process

        #Replay memory
        self.memory = ReplayBuffer(self.action_size, BUFFER_SIZE, MINI_BATCH) #define experience replay buffer object
Пример #25
0
    def __init__(self, state_size, batch_size, is_eval=False):
        self.state_size = state_size
        self.action_size = 3  #buy,sell,hold

        #defining replay memory size
        self.buffer_size = 1000000
        self.batch_size = batch_size
        self.memory = ReplayBuffer(self.buffer_size, self.batch_size)
        self.inventory = []

        #define wether or not training is going on
        self.is_eval = is_eval
        #Discount factor
        self.gamma = 0.99
        # soft update for AC model
        self.tau = 0.001

        #instantiate the local and target actor models for soft updates
        self.actor_local = Actor(self.state_size, self.action_size)
        self.actor_target = Actor(self.state_size, self.action_size)

        #critic model mapping state-action pairs with Q-values
        self.critic_local = Critic(self.state_size, self.action_size)

        #instantiate the local and target critic models for soft updates
        self.critic_target = Critic(self.state_size, self.action_size)
        self.critic_target.model.set_weights(
            self.critic_local.model.get_weights())

        #set target model parameter to local model parameters
        self.actor_target.model.set_weights(
            self.actor_local.model.get_weights())
Пример #26
0
    def __init__(self,
                 env,
                 alpha: float = 1e-3,
                 gamma: float = 0.99,
                 hidden_size: int = 32,
                 tau: float = 1e-3):
        self.env = env
        self.gamma = gamma
        self.alpha = alpha
        self.tau = tau
        self.device = "cuda" if torch.cuda.is_available() else "cpu"

        self.actor = Actor(2, hidden_size, 1)
        self.actor_target = deepcopy(self.actor)

        self.critic = Critic(3, hidden_size, 1)
        self.critic_target = deepcopy(self.critic)

        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),
                                                lr=alpha)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),
                                                 lr=alpha)

        self.critic.to(self.device)
        self.critic_target.to(self.device)
        self.actor.to(self.device)
        self.actor_target.to(self.device)
    def __init__(self,
                 seed,
                 n_state,
                 n_action,
                 batch_size=64,
                 buffer=1e5,
                 gamma=0.99,
                 lr_actor=1e-4,
                 lr_critic=1e-3,
                 weight_decay=0,
                 tau=1e-3):
        self.batch_size = batch_size

        #init actor
        self.local_actor = Actor(n_state, n_action, seed).to(device)
        self.target_actor = Actor(n_state, n_action, seed).to(device)
        self.optim_actor = torch.optim.Adam(self.local_actor.parameters(),
                                            lr=lr_actor)
        #init critic
        self.local_critic = Critic(n_state, n_action, seed).to(device)
        self.target_critic = Critic(n_state, n_action, seed).to(device)
        self.optim_critic = torch.optim.Adam(self.local_critic.parameters(),
                                             lr=lr_critic,
                                             weight_decay=weight_decay)

        #init memory
        self.memory = memory(int(buffer), device, seed)
        self.tau = tau
        self.gamma = gamma
        self.noise = noise(n_action, seed=seed)
Пример #28
0
    def __init__(self, state_size, action_size, params, seed):
        """Initialize a DDPG agent
        
        Params
        ======
            state_size (int): dimension of each state
            action_size (int): dimension of each action
            params (Params): hyperparameters 
            seed (int): random seed
        """

        self.gamma = params.gamma
        self.tau = params.tau
        self.seed = np.random.seed(seed)

        # actor networks
        self.actor_local = Actor(state_size, action_size, params.units_actor,
                                 seed).to(device)
        self.actor_target = Actor(state_size, action_size, params.units_actor,
                                  seed).to(device)
        self.actor_optimizer = optim.Adam(self.actor_local.parameters(),
                                          params.lr_actor)

        # critic newtworks
        self.critic_local = Critic(state_size, action_size,
                                   params.units_critic, seed).to(device)
        self.critic_target = Critic(state_size, action_size,
                                    params.units_critic, seed).to(device)
        self.critic_optimizer = optim.Adam(self.critic_local.parameters(),
                                           params.lr_critic)

        # Noise process
        self.noise = OUNoise(action_size, seed, params.mu, params.theta,
                             params.sigma)
Пример #29
0
    def __init__(self,
                 texto="None",
                 x=0,
                 y=0,
                 magnitud=20,
                 vertical=False,
                 fuente=None,
                 fijo=True,
                 ancho=0):
        """Inicializa el actor.

        :param texto: Texto a mostrar.
        :param x: Posición horizontal.
        :param y: Posición vertical.
        :param magnitud: Tamaño del texto.
        :param vertical: Si el texto será vertical u horizontal, como True o False.
        :param fuente: Nombre de la fuente a utilizar.
        :param fijo: Determina si el texto se queda fijo aunque se mueva la camara. Por defecto está fijo.
        :param ancho: El limite horizontal en pixeles para la cadena, el texto de mostrara en varias lineas si no cabe en este límite.
        """
        self._ancho_del_texto = ancho
        self.__magnitud = magnitud
        self.__vertical = vertical
        self.__fuente = fuente
        self.__color = pilas.colores.blanco
        Actor.__init__(self, x=x, y=y)
        self.centro = ("centro", "centro")
        self.fijo = fijo
        self.texto = texto
Пример #30
0
 def __init__(self, image):
     Actor.__init__(self, image)
     self.direction = random.randrange(-1, 2) * const.ENEMY_SPEED
     if self.direction > 0:
         self.rect.left = const.SCREENRECT.left
     else:
         self.rect.right = const.SCREENRECT.right
Пример #31
0
        def loadActors(self, count=100):
                """function to read actors data and create a relationship between actors and movie."""

                actors = self.readArtists("actor", count)
                self.filePointerActors = self.fileHandleActors.tell()

                objActor = Actor("", 0)
                objMovie = Movie("", "", 0)

                for actor in actors:
                        name, title, year = actor
                        
                        # check if actor does not exist else create ID
                        if objActor.getArtistByName(name, self.actorsList) == None:
                                self.artistCounter += 1
                                actorId = self.artistCounter
                                newActor = Actor(actorId, name)
                                self.actorsList.append(newActor)
                                
                                # searches the movie list to check if movies have not been already visualized
                                if not objMovie.doesMovieExist(title, self.moviesList) and \
                                   objMovie.getMovieByName(title, self.moviesList) == None:
                                        self.movieCounter += 1
                                        movieId = self.movieCounter
                                        movie = Movie(movieId, title, year)
                                        self.moviesList.append(movie)

                                        # create relation between actor and  movie
                                        relation = ArtistMovieRelation(actorId, movieId)
                                        self.relationsList.append(relation)
Пример #32
0
    def __init__(self, env, sess, low_action_bound_list,
                 high_action_bound_list):
        self.env = env
        self.sess = sess
        self.low_action_bound_list = low_action_bound_list  # depends on the env
        self.high_action_bound_list = high_action_bound_list
        self.action_range_bound = [
            hi - lo for hi, lo in zip(self.high_action_bound_list,
                                      self.low_action_bound_list)
        ]
        self.learning_rate = 0.0001  #TODO move these to configs
        self.epsilon = 1.0
        self.epsilon_min = 0.1
        self.epsilon_decay = 1e-6
        self.gamma = 0.99
        self.tau = 0.001
        self.buffer_size = 1000000
        self.batch_size = 128
        self.theta = 0.15
        self.ou = 0
        self.sigma = 0.3

        self.state_dim = self.env.observation_space.shape[0]
        self.action_dim = len(self.low_action_bound_list
                              )  #self.env.action_space, make this into input
        self.continuous_action_space = True

        # Initialize replay buffer
        self.replay_buffer = ReplayBuffer(self.buffer_size)

        # Creating ACTOR model
        actor_ = Actor(self.state_dim, self.action_dim, self.learning_rate)
        self.actor_state_input, self.actor_model = actor_.create_actor_model()
        _, self.target_actor_model = actor_.create_actor_model()

        self.actor_critic_grad = tf.placeholder(tf.float32,
                                                [None, self.action_dim])

        actor_model_weights = self.actor_model.trainable_weights
        self.actor_grads = tf.gradients(self.actor_model.output,
                                        actor_model_weights,
                                        -self.actor_critic_grad)

        grads = zip(self.actor_grads, actor_model_weights)
        self.optimize = tf.train.AdamOptimizer(
            self.learning_rate).apply_gradients(grads)

        # Creating CRITIC model
        critic_ = Critic(self.state_dim, self.action_dim, self.learning_rate)
        self.critic_state_input, self.critic_action_input, self.critic_model = critic_.create_critic_model(
        )
        _, _, self.target_critic_model = critic_.create_critic_model()

        self.critic_grads = tf.gradients(self.critic_model.output,
                                         self.critic_action_input)

        self.noise = OrnsteinUhlenbeckProcess(size=self.action_dim)
        self.noise.reset()

        self.sess.run(tf.initialize_all_variables())
Пример #33
0
    def __init__(self, input_dim, output_dim, lr, gamma, tau, alpha, clipnorm,
                 clipnorm_val, verbose):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.actions = range(output_dim)
        self.lr = lr
        self.gamma = gamma
        self.tau = tau
        self.alpha = alpha
        self.clipnorm_val = 1.0

        #Buffer for experience replay
        self.S = []
        self.A = []
        self.R = []
        self.S1 = []  #next state
        self.D = []
        self.memory_size = 2000
        self.batchsize = 32

        #Make actor and critic
        self.actor = Actor(input_dim, output_dim, lr, gamma, tau, alpha,
                           clipnorm, clipnorm_val, verbose)
        self.criticQ = CriticQ(input_dim, output_dim, lr, gamma, tau, alpha,
                               clipnorm, clipnorm_val, verbose)
        self.criticV = CriticV(input_dim, output_dim, lr, gamma, tau, alpha,
                               clipnorm, clipnorm_val, verbose)
Пример #34
0
    def __init__(self, state_size, action_size, max_action, minibatch_size,
                 a_lr, c_lr, gamma, tau):
        self.state_size = state_size
        self.action_size = action_size
        self.max_action = max_action

        self.critic_lr = c_lr
        self.actor_lr = a_lr

        self.actor_network = Actor(self.state_size, self.action_size,
                                   self.max_action, self.actor_lr)
        self.actor_target_network = Actor(self.state_size, self.action_size,
                                          self.max_action, self.actor_lr)
        self.critic_network = Critic(self.state_size, self.action_size,
                                     self.critic_lr)
        self.critic_target_network = Critic(self.state_size, self.action_size,
                                            self.critic_lr)

        self.actor_target_network.set_weights(self.actor_network.get_weights())
        self.critic_target_network.set_weights(
            self.critic_network.get_weights())

        self.critic_optimizer = optimizers.Adam(learning_rate=self.critic_lr)
        self.actor_optimizer = optimizers.Adam(learning_rate=self.actor_lr)

        self.replay_buffer = ReplayBuffer(1e6)
        self.MINIBATCH_SIZE = minibatch_size
        self.GAMMA = tf.cast(gamma, dtype=tf.float64)
        self.TAU = tau
        self.noise = OUNoise(self.action_size)
Пример #35
0
    def __init__(self, state_item_num, action_item_num, emb_dim, batch_size, tau, actor_lr, critic_lr,
                 gamma, buffer_size, item_space, summary_dir):

        self.state_item_num = state_item_num
        self.action_item_num = action_item_num
        self.emb_dim = emb_dim
        self.batch_size = batch_size
        self.tau = tau
        self.actor_lr = actor_lr
        self.critic_lr = critic_lr
        self.gamma = gamma
        self.buffer_size = buffer_size
        self.item_space = item_space
        self.summary_dir = summary_dir

        self.sess = tf.Session()

        self.s_dim = emb_dim * state_item_num
        self.a_dim = emb_dim * action_item_num
        self.actor = Actor(self.sess, state_item_num, action_item_num, emb_dim, batch_size, tau, actor_lr)
        self.critic = Critic(self.sess, state_item_num, action_item_num, emb_dim,
                             self.actor.get_num_trainable_vars(), gamma, tau, critic_lr)
        self.exploration_noise = OUNoise(self.a_dim)

        # set up summary operators
        self.summary_ops, self.summary_vars = self.build_summaries()
        self.sess.run(tf.global_variables_initializer())
        self.writer = tf.summary.FileWriter(summary_dir, self.sess.graph)

        # initialize target network weights
        self.actor.hard_update_target_network()
        self.critic.hard_update_target_network()

        # initialize replay memory
        self.replay_buffer = ReplayBuffer(buffer_size)
Пример #36
0
    def __init__(self, point, sprite, rank):

        self.party_rank = rank
        self.ai_tag = self.set_ai_tag()

        Actor.__init__(self, point, sprite)
        self.stat_component = self.set_stats()
Пример #37
0
 def test_get_full_attack_adds_requested_attributes(self):
     fighter = Fighter(5)
     rogue = Rogue(16)
     strength = Attribute(Attribute.STRENGTH, 16)
     dexterity = Attribute(Attribute.DEXTERITY, 14)
     actor = Actor('Rogue Fighter', [strength, dexterity], [rogue, fighter])
     full_attack = actor.get_full_attack()
     self.assertEqual(len(full_attack), 4)
     self.assertEqual(full_attack[0].value, 20)
     self.assertEqual(
         '+12 from level 16 Rogue. +5 from level 5 Fighter. +3, Strength ability score of 16. ',
         full_attack[0].audit_explanation
     )
     self.assertEqual(full_attack[1].value, 15)
     self.assertEqual(
         'Additional attack split from base BAB at a +5 breakpoint. See first attack for full audit trail.',
         full_attack[1].audit_explanation
     )
     self.assertEqual(full_attack[2].value, 10)
     self.assertEqual(
         'Additional attack split from base BAB at a +5 breakpoint. See first attack for full audit trail.',
         full_attack[2].audit_explanation
     )
     self.assertEqual(full_attack[3].value, 5)
     self.assertEqual(
         'Additional attack split from base BAB at a +5 breakpoint. See first attack for full audit trail.',
         full_attack[3].audit_explanation
     )
Пример #38
0
    def __init__(self, name, virtual=False):
        Actor.__init__(self)

        self.name = name
        self.faction = None
        self.living = True
        self.virtual = virtual
Пример #39
0
    def __init__(self, parent=None, mode='add', actor_id=None):
        EditorBaseFrame.__init__(self, parent)
        self.actor = Actor()
        self.mode = mode
        self.actor_id = actor_id
        if self.actor_id is not None:
            self.load_actor()

        # create widgets
        name_frame = ttk.LabelFrame(self, text='Name')
        self.name_entry = ttk.Entry(name_frame, textvariable=self.actor.get_name_var())
        alignment_frame = ttk.LabelFrame(self, text='Alignment')
        self.alignment_combobox = \
            ttk.Combobox(alignment_frame, values=('Hero', 'Anti-Hero', 'Villain', 'Civilian', 'Wild Card'),
                         textvariable=self.actor.get_alignment_var())
        self.alignment_combobox.set(self.actor.alignment)
        self.save_button = ttk.Button(self, text='Save', command=self._b_save)
        self.back_button = ttk.Button(self, text='Back', command=self._b_back)

        # grid widgets
        self.name_entry.grid()
        self.alignment_combobox.grid()
        name_frame.grid(column=0, row=0)
        alignment_frame.grid(column=1, row=0)
        self.save_button.grid(column=10, row=0)
        self.back_button.grid(column=11, row=0)
Пример #40
0
 def test_will_save_includes_audit(self):
     wisdom = Attribute(Attribute.WISDOM, 24)
     rogue = Rogue(3)
     actor = Actor('Test Rogue With Rouge', [wisdom], [rogue])
     self.assertEqual(
         '+1, Level 3 Rogue class bonus. +7, Wisdom ability score of 24. ',
         actor.get_will_save().audit_explanation
     )
Пример #41
0
 def __init__(self, initial_position, vector_shot_from):
     self._speed = 20
     Actor.__init__(self, Bullet.img[0], initial_position, vector_shot_from.angle)
     self._vector = Vector.product(Vector(self._speed, self._angle), vector_shot_from)
     center = self.rect.center
     self.image = Bullet.img[int(2*math.degrees(- self._angle - math.pi/2))]
     self.rect = self.image.get_rect(center=center)
     self.radius = (Bullet.img[0].get_rect().width + Bullet.img[0].get_rect().height)/4
Пример #42
0
 def test_reflex_save_includes_audit(self):
     dexterity = Attribute(Attribute.DEXTERITY, 23)
     rogue = Rogue(2)
     actor = Actor('Test Rogue With Rouge', [dexterity], [rogue])
     self.assertEqual(
         '+3, Level 2 Rogue class bonus. +6, Dexterity ability score of 23. ',
         actor.get_reflex_save().audit_explanation
     )
Пример #43
0
 def test_fortitude_save_includes_audit(self):
     constitution = Attribute(Attribute.CONSTITUTION, 22)
     rogue = Rogue(1)
     actor = Actor('Test Rogue With Rouge', [constitution], [rogue])
     self.assertEqual(
         '+0, Level 1 Rogue class bonus. +6, Constitution ability score of 22. ',
         actor.get_fortitude_save().audit_explanation
     )
Пример #44
0
 def test_will_save_combines_wisdom_and_class_bonus(self):
     wisdom = Attribute(Attribute.WISDOM, 21)
     rogue = Rogue(20)
     actor = Actor('Test Rogue With Rouge', [wisdom], [rogue])
     self.assertEqual(
         rogue.get_will_save().value + wisdom.get_attribute_modifier().value,
         actor.get_will_save().value
     )
Пример #45
0
 def test_reflex_save_combines_dexterity_and_class_bonus(self):
     dexterity = Attribute(Attribute.DEXTERITY, 13)
     rogue = Rogue(17)
     actor = Actor('Test Rogue With Rouge', [dexterity], [rogue])
     self.assertEqual(
         rogue.get_reflex_save().value + dexterity.get_attribute_modifier().value,
         actor.get_reflex_save().value
     )
Пример #46
0
 def test_fortitude_save_combines_constitution_and_class_bonus(self):
     constitution = Attribute(Attribute.CONSTITUTION, 17)
     rogue = Rogue(19)
     actor = Actor('Test Rogue With Rouge', [constitution], [rogue])
     self.assertEqual(
         rogue.get_fortitude_save().value + constitution.get_attribute_modifier().value,
         actor.get_fortitude_save().value
     )
Пример #47
0
	def __init__(self, screen):
		self._screen_w = screen.get_rect().width
		self._screen_h = screen.get_rect().height
		# start in dumb random place, needs changed later
		pos = (random.randint(0, self._screen_w) + self._screen_w*random.choice((-1, 1)),
			   random.randint(0, self._screen_h) + self._screen_h*random.choice((-1, 1)))	
		# Initialize an actor with these values
		Actor.__init__(self, Shooter.img[0], pos, 0)
Пример #48
0
 def __init__(self, texto="None", x=0, y=0, magnitud=20):
     imagen = pilas.mundo.motor.obtener_texto(texto, magnitud)
     self._definir_area_de_texto(texto, magnitud)
     Actor.__init__(self, imagen, x=x, y=y)
     self.magnitud = magnitud
     self.texto = texto
     self.color = pilas.colores.blanco
     self.centro = ("centro", "centro")
     self.fijo = True
Пример #49
0
 def test_get_base_attack_returns_sum_of_class_bab(self):
     fighter = Fighter(11)
     rogue = Rogue(4)
     actor = Actor('Fighter Rogue', [], [fighter, rogue])
     self.assertEqual(14, actor.get_base_attack_bonus().value)
     self.assertEqual(
         '+3 from level 4 Rogue. +11 from level 11 Fighter. ',
         actor.get_base_attack_bonus().audit_explanation
     )
Пример #50
0
class TestActorClass(unittest.TestCase):
    def setUp(self):
        self.a = Actor("Best.Actor.Ever.")

    def test_get_actor_name(self):
        self.assertEqual("Best.Actor.Ever.", self.a.get_name())

    @unittest.skip("No point in adding extra actors to db. It works, trust me")
    def test_save_actor_into_database(self):
        self.assertTrue(self.a.save())
Пример #51
0
	def next(self):
		actor = Actor("init", "actor")
		for line in self.open_file.next():
			if (line.containsActorName()):
				yield actor
				actor, film = line.getActorAndFilm()
				actor.addFilm(film)
			else:
				film = line.getFilm()
				if not film is None:
					actor.addFilm(film)
Пример #52
0
 def __init__(self, xvel, yvel, seed, projimage):
     Actor.__init__(self)
     #self.image = pygame.image.load("").convert_alpha()
     #self.rect.x = x#
     #self.rect.y = y#
     self.projimage = projimage
     self.xvel = xvel
     self.yvel = yvel
     self.frame = 0
     self.health = 1
     random.seed(seed)
Пример #53
0
	def restart_game(self, btn):
		
		self.isGameOver = False
		self.player = Actor("Player", False, "X")
		self.enemy = Actor("Enemy", True, "O")
		self.lstAvailableChoice = list(self.dictIndexToButtonName.keys())
		
		self.player.start_first()
		
		self.set_all_button_text("")
		self.set_all_button_disable(False)
Пример #54
0
 def __init__(self,world,y,x,name=None):
     Actor.__init__(self,world,y,x)
     self._hp, self.hpmax = 10, 10
     self._running = None
     self.weapon = None
     self.armor = None
     self.xplvl = 1
     self._xp = 0
     if not name:
         self.generate_name()
     else:
         self.desc = name
Пример #55
0
 def __new__(cls, *args, **kwds):
   actor = Actor(local_theatre())
   type = cls.__name__
   (actor_id, exists) = actor.theatre.globally_register_global_singleton(actor, type)
   if not exists:
     actorstate = object.__new__(cls)
     actorstate.__init__(type, type)
     actorstate.singleton = True
     actorstate.add_birth(args, kwds)
     actor.setstate(actorstate)
     actor.start()
   return Reference(actor_id)
Пример #56
0
 def __init__(self, x, y):
     Actor.__init__(self)
     self.image = pygame.image.load("gfx/ArmPart1.png").convert_alpha()
     self.image2 = pygame.image.load("gfx/ArmPart3.png").convert_alpha()
     self.rect = self.image.get_rect()
     self.rect.width = 64
     self.rect.height = 64
     self.rect.x = x
     self.rect.y = y
     self.maxhealth = 250
     self.health = self.maxhealth
     self.damage = 2
Пример #57
0
 def __init__(self, x, y, image):
     Actor.__init__(self)
     self.image = image
     self.rect = self.image.get_rect()
     self.rect.width = 16
     self.rect.height = 16
     self.rect.x = x
     self.rect.y = y
     self.xvel = 12
     self.yvel = 0
     self.health = -1
     self.damage = 10
Пример #58
0
    def test_actor(self):
        a = Actor()

        a.addEvent('test_event')

        e = SensorEvent(42, 'test_event', 23)

        events = a.event(e)

        self.assertEqual(events, [e])
        self.assertEqual(a.value, 23)
        self.assertEqual(a.label, 'test_event')
        self.assertEqual(a.t, 42)
Пример #59
0
 def __init__(self, screen, position, size, velocity):
     Actor.__init__(self, screen, position, size, velocity)
     self.friction = 0.3
     self.state = Hydrophyte.UNMARKED
     self.images = [pygame.image.load(ur).convert() for ur in Hydrophyte.imgs]
     for im in self.images:
         im.set_colorkey((0,0,0))
     self.images[0] = pygame.transform.scale(self.images[0], (2*size, 2*size))
     self.images[1] = pygame.transform.scale(self.images[1], (2*size, 2*size))
     self.image_w, self.image_h = self.images[0].get_size()
     self.phase_threshold_time = 0.5
     self.sema_drown = 2 # semaphor - when equal to 0, then hydrophyte's drowning
     self.phase_time = 0
Пример #60
0
def main():
    """Entry point for the application script"""

    parser = argparse.ArgumentParser(description=\
                                     'functionality: Spawn a ZCM Actor', 
                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--config', nargs='?', default=None, help='Name of configuration file')
    args = vars(parser.parse_args())

    if not (args['config'] == None):
        my_actor = Actor()
        my_actor.configure(args['config'])
        my_actor.run()