コード例 #1
0
    def test_agent_query(self):
        ag1 = model.Agent('1', 'agent1', '127.0.0.1', datetime.now())
        ag1.save()
        ag2 = model.Agent('2', 'agent2', '127.0.0.2', datetime.now())
        ag2.save()
        ag3 = model.Agent('3', 'agent3', '127.0.0.3', datetime.now())
        ag3.save()

        agents = model.Agent.query()
        self.assertEqual(3, len(agents))

        agents = model.Agent.query(where='aid=?', params=['1'])
        self.assertEqual(1, len(agents))
        self.assertEqual(ag1, agents[0])

        agents = model.Agent.query(orderby='aid ASC')
        self.assertEqual(3, len(agents))
        self.assertEqual(ag1, agents[0])
        self.assertEqual(ag2, agents[1])
        self.assertEqual(ag3, agents[2])

        agents = model.Agent.query(orderby='aid ASC', offset=1, limit=2)
        self.assertEqual(2, len(agents))
        self.assertEqual(ag2, agents[0])
        self.assertEqual(ag3, agents[1])
コード例 #2
0
    def test_agent_count(self):
        ag1 = model.Agent('1', 'agent1', '127.0.0.1', datetime.now())
        ag2 = model.Agent('2', 'agent2', '127.0.0.1', datetime.now())
        ag3 = model.Agent('3', 'agent3', '127.0.0.1', datetime.now())
        model.Agent.save_all([ag1, ag2, ag3])

        self.assertEqual(3, model.Agent.count())
        self.assertEqual(1, model.Agent.count(where='aid=?', params=['1']))
        self.assertEqual(2, model.Agent.count(where='aid>?', params=['1']))
        self.assertEqual(0, model.Agent.count(where='name=?', params=['1']))
コード例 #3
0
 def test_remove_agent(self):
     ag = model.Agent('12345678', 'agent1', '127.0.0.1', datetime.now())
     ag.save()
     agents = model.Agent.query()
     self.assertEqual(1, len(agents))
     self.assertEqual(ag, agents[0])
     ag.remove()
     agents = model.Agent.query()
     self.assertEqual(0, len(agents))
コード例 #4
0
    def test_add_agent(self):
        aglist = model.Agent.query()
        self.assertEqual(0, len(aglist))

        ag = model.Agent('12345678', 'agent1', '127.0.0.1', datetime.now())
        ag.save()

        aglist = model.Agent.query()
        self.assertEqual(1, len(aglist))
        self.assertEqual(ag, aglist[0])
コード例 #5
0
 def test_update_agent(self):
     ag = model.Agent('12345678', 'agent1', '127.0.0.1', datetime.now())
     ag.save()
     ag.set(last_cpu_util=99,
            last_mem_util=55.5,
            last_sys_load1=1.1,
            last_sys_cs=123)
     aglist = model.Agent.query()
     self.assertEqual(1, len(aglist))
     nag = aglist[0]
     self.assertEqual(99, nag.last_cpu_util)
     self.assertEqual(55.5, nag.last_mem_util)
     self.assertEqual(1.1, nag.last_sys_load1)
     self.assertEqual(123, nag.last_sys_cs)
コード例 #6
0
 def _agent_reg(self, msg):
     agent = self.find_agent(msg.agentid)
     ahostname = msg.body['hostname']
     aname = ahostname + '@' + msg.body['os']
     if agent:
         agent.set(name=aname)
         logging.info('activate existing agent %s', agent)
         # TODO activation
     else:
         agent = model.Agent(msg.agentid,
                             aname,
                             ahostname,
                             create_at=datetime.utcnow())
         agent.save()
         logging.info('new agent %s registered', agent)
         self._agents[agent.aid] = agent
     return True
コード例 #7
0
ファイル: main.py プロジェクト: Dradoue/Racing-learning
is_renforcement = True
is_heuristique = False
is_ai = True

train = True
agent = None

if is_renforcement:
    agent = model.reload_agent_renforcement(path, reload_model,
                                            reload_nb_tentative)

if reload_model and is_renforcement:

    nb_tentative = model.get_nb_tentative(path)
    agent = model.Agent(INPUT_LEN, 4, nb_tentative)
    agent.load_model(path + "DDQN_model_cp.h5")
else:
    agent = model.Agent(INPUT_LEN, 4)

max_frame = 1200

t = 0

while train:

    t = t + 1
    pygame.event.get()
    running, score_update, checkpoint = game_loop(screen,
                                                  clock,
                                                  car,
コード例 #8
0
ファイル: main.py プロジェクト: mattolson93/growing_atari
print('Initializing OpenAI environment...')
if args.fskip % 2 == 0 and args.env == 'SpaceInvaders-v0':
    print("SpaceInvaders needs odd frameskip due to bullet alternations")
    args.fskip = args.fskip -1


envs = MultiEnvironment(args.env, args.batch_size, args.fskip)
action_size = envs.get_action_size()

print('Building models...')
torch.cuda.set_device(args.gpu)
if not (os.path.isfile(args.agent_file) and  os.path.isfile(args.agent_file) and  os.path.isfile(args.agent_file)):
    print("need an agent file")
    exit()
    args.agent_file = args.env + ".model.80.tar"
agent = model.Agent(action_size).cuda()
agent.load_state_dict(torch.load(args.agent_file, map_location=map_loc))

Z_dim = args.latent

encoder = model.Encoder(Z_dim).cuda()
generator = model.Generator(Z_dim).cuda()

encoder.train()
generator.train()
    
optim_gen = optim.Adam(generator.parameters(), lr=args.lr, betas=(0.0,0.9))
optim_enc = optim.Adam(filter(lambda p: p.requires_grad, encoder.parameters()), lr=args.lr, betas=(0.0,0.9))

print('finished building model')
コード例 #9
0
import environment
import model
import torch
from itertools import count
from settings import DEVICE, SCREEN_WIDTH, TARGET_UPDATE, EPOCHS

# Environment Setup
world = environment.init()
world.reset()

# Log how long our agent lasts for each iteration
durations = []

# Training
agent = model.Agent(DEVICE)

for i in range(EPOCHS):
    # Initialize environment
    world.reset()

    # Get current state
    last_screen = environment.get_screen(world, SCREEN_WIDTH, DEVICE)
    current_screen = environment.get_screen(world, SCREEN_WIDTH, DEVICE)
    state = current_screen - last_screen

    for t in count():
        # Select and perform an action
        action = agent.select_action(state)
        _, reward, done, _ = world.step(action.item())
        reward = torch.tensor([reward], device=DEVICE)
コード例 #10
0
print('Initializing OpenAI environment...')
if args.fskip % 2 == 0 and args.env == 'SpaceInvaders-v0':
    print("SpaceInvaders needs odd frameskip due to bullet alternations")
    args.fskip = args.fskip -1


envs = MultiEnvironment(args.env, args.batch_size, args.fskip)
action_size = envs.get_action_size()

print('Building models...')
torch.cuda.set_device(args.gpu)
if not (os.path.isfile(args.agent_file) and  os.path.isfile(args.agent_file) and  os.path.isfile(args.agent_file)):
    print("need an agent file")
    exit()
    args.agent_file = args.env + ".model.80.tar"
agent = model.Agent(action_size, args.agent_latent).cuda()
agent.load_state_dict(torch.load(args.agent_file, map_location=map_loc))

Z_dim = args.latent
wae_z_dim = args.wae_latent

encoder = model.Encoder(Z_dim).cuda()
generator = model.Generator(Z_dim, action_size).cuda()
discriminator = model.Discriminator(Z_dim, action_size).cuda()
Q = model.Q_net(args.wae_latent).cuda()
P = model.P_net(args.wae_latent).cuda()


Q.load_state_dict(torch.load(args.Q, map_location=map_loc))
P.load_state_dict(torch.load(args.P, map_location=map_loc))    
encoder.train()
コード例 #11
0
    from tqdm import tqdm
    from multiprocessing import Process, Lock, Manager

    import model
    import helper

    #Remove tensorflow initialization messages
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
    tf.logging.set_verbosity(tf.logging.ERROR)

    with tf.Session(config=tf.ConfigProto(device_count={'GPU': 0})) as sess:
        pass

    #Create Agent
    agent = model.Agent(
        6, 64, 0.001, "epAgent"
    )  #Model Architecture set here-- (blocks, filters/block, l2reg)

    #Load in from previous run
    constant = 0  #Set this constant to the iteration you want to start on (default for brand new run)
    """
	agent.model = helper.load_model("bin/model.json", "bin/modelCycle30.h5")
	agent.running_model = helper.load_model("bin/model.json", "bin/modelRunAv30.h5")
	agent.elo = 720
	agent.train_count = constant
	"""

    #Create chess board and initialize stockfish
    board = chess.Board()
    stockfish = chess.uci.popen_engine("stockfish")
    info_handler = chess.uci.InfoHandler()
コード例 #12
0
ファイル: mf_sample.py プロジェクト: jwcarr/shepard
	for participant_i in range(args.participant_i, args.participant_i + args.participants_per_job):

		# Get participant data_in and data_out
		with open(os.path.join(args.path, 'data', str(participant_i)), 'r') as file:
			data_in, data_out, participant_id, initial_hypothesis = file.read().strip().split('\n')

		# If an initial hypothesis has been set and you're using the
		# simplicity prior, construct the initial hypothesis array.
		# This is a hack to get MH to converge faster in cases where
		# we expect the agent will produce large categories (which can
		# be very time consuming to rectangularize).
		if args.prior == 'simplicity':
			initial_hypothesis = eval(initial_hypothesis)
		else:
			initial_hypothesis = None
		if initial_hypothesis is not None:
			initial_hypothesis = np.full(shape, initial_hypothesis, dtype=int)

		# Get model agent to infer a language based on participant's
		# data_in and then measure likelihood of participant's data_out
		# given agent's inferred language.
		agent = model.Agent(shape, maxcats, args.prior, args.weight, args.noise, exposures, mcmc_iterations)
		agent.learn(eval(data_in), initial_hypothesis)
		lhood = agent._likelihood(eval(data_out), agent.language)

		# Write out weight, noise, measured likelihood, and inferred
		# language to file for collection by collect.py
		with open(os.path.join(args.path, 'cand', str(args.iteration), str(participant_i)), 'w') as file:
			file.write('\n'.join(map(str, ([args.weight, args.noise, lhood, agent.language.flatten().tolist()]))))
コード例 #13
0
    def step(self):
        # Iterate over all the agents
        for pos, agent in self.agents.items():
            # Steps are not used yet
            self.steps += 1
            #if agent exists
            if self.agents[pos]:
                agent.step()

        # Get the agents that need to be updated in the dictionary
        agents_to_be_updated = []
        for pos, agent in self.agents.items():
            if agent.update is True:
                agents_to_be_updated.append(agent)

        # Update the dictionary
        for agent in agents_to_be_updated:
            self.remove_by_key(agent.old_pos)
            self.add(agent)
            agent.update = False

        # Prevent 0-agent crash
        if len(self.agents) == 0:
            print("Number of agents reached zero, exiting..")
            sys.exit()

        # Add plotting information:
        self.model.happy_plot.append(float((self.model.happy)/len(self.agents)))
        self.model.moves_plot.append(self.model.moves)
        self.model.deaths_plot.append(self.model.deaths)
        self.model.total_agents.append(len(self.agents))
        self.model.adult_agents.append((sum(agent.type == 1 for agent in self.agents.values())))
        self.model.young_agents.append((sum(agent.type == 0 for agent in self.agents.values())))
        self.model.elderly_agents.append((sum(agent.type == 2 for agent in self.agents.values())))
        self.model.similar_neighbors.append(sum(agent.similar for agent in self.agents.values())/len(self.agents))

        # Remove agents that are too old from grid
        agents_removed = {pos: agent for pos, agent in self.agents.items() if agent.destroy is True}
        for pos, agent in agents_removed.items():
            self.model.grid.remove_agent(agent)

        # Remove agents that are too old from the agents dictionary
        #self.agents = {pos: agent for pos, agent in self.agents.items() if agent.destroy is False}

        # Get the agents that need to be deleted
        to_delete = []
        for pos, agent in self.agents.items():
            if agent.destroy is True:
               to_delete.append(agent.pos)

        # Delete the agents from the dictionary
        for pos in to_delete:
             agent = self.agents[pos]
             self.remove(agent)

        # Let adults reproduce with a certain percentage
        # NOTE: Rather not have this in this step function, but cannot mutate
        # dictionary while iterating over it (if we put this function in Agent class)
        counter = 0
        for i in range(sum(agent.type == 1 for agent in self.agents.values())):
            if random.random() <= self.model.reproduction:
                if float(len(self.model.grid.empty_spots)) >= 0.05*self.model.width*self.model.height:
                    counter += 1
                    # Create agents, place them on the grid and add them to the scheduler
                    agent = model.Agent((None, None), self.model, agent_type=0, age=0)
                    self.model.grid.place_agent_on_empty(agent)
                    self.add(agent)

        # Add the number of births to the model for this epoch
        self.model.births_plot.append(counter)


        if not self.agents:
            self.model.running = False
def main():
    #load models
    #load up an atari game
    #run (and save) every frame of the game
    #args = parse_args()

    args = parse_args()
    if args.missing == "none":
        args.seed = 45
    if args.missing == "agent":
        args.seed = 13


    MAX_ITERS = args.iters
    speed = args.speed
    frames_to_cf = args.frames_to_cf
    seed = args.seed
    img_dir = args.img_dir
    
    img_dir = os.path.join(img_dir, "imgs_{}_{}".format(args.enc_file[:-7]+args.enc_file[-2:], args.agent_file[-7:]))

    if args.enc_file == None or args.gen_file == None:
        print("Need to load models for the gen and enc")
        exit()

    if not os.path.isfile(args.agent_file):
        args.agent_file = args.env + ".model.80.tar"
        if not os.path.isfile(args.agent_file):
            print("bad agent_file")
            exit()


    map_loc = {
            'cuda:0': 'cuda:'+str(args.gpu),
            'cuda:1': 'cuda:'+str(args.gpu),
            'cuda:2': 'cuda:'+str(args.gpu),
            'cuda:3': 'cuda:'+str(args.gpu),
            'cuda:4': 'cuda:'+str(args.gpu),
            'cuda:5': 'cuda:'+str(args.gpu),
            'cuda:7': 'cuda:'+str(args.gpu),
            'cuda:6': 'cuda:'+str(args.gpu),
            'cuda:8': 'cuda:'+str(args.gpu),
            'cuda:9': 'cuda:'+str(args.gpu),
            'cuda:10': 'cuda:'+str(args.gpu),
            'cuda:11': 'cuda:'+str(args.gpu),
            'cuda:12': 'cuda:'+str(args.gpu),
            'cuda:13': 'cuda:'+str(args.gpu),
            'cuda:14': 'cuda:'+str(args.gpu),
            'cuda:15': 'cuda:'+str(args.gpu),
            'cpu': 'cpu',
    }

    if args.frame_skip % 2 ==0 and args.env == 'SpaceInvaders-v0':
        print("SpaceInvaders needs odd frameskip due to bullet alternations")
        args.frame_skip = args.frame_skip - 1

    #run every model on all frames (4*n frames))
    print('Loading model...')
    torch.cuda.set_device(args.gpu)
    torch.manual_seed(args.seed)
    #number of updates to discriminator for every update to generator
    envs = MultiEnvironment(args.env, 1, args.frame_skip)
    

    agent = model.Agent(envs.get_action_size(), args.agent_latent).cuda() #cuda is fine here cause we are just using it for perceptual loss and copying to discrim
    agent.load_state_dict(torch.load(args.agent_file))
    encoder = model.Encoder(args.latent).cuda()
    generator = model.Generator(args.latent, envs.get_action_size()).cuda()
    Q = model.Q_net(args.wae_latent).cuda()
    P = model.P_net(args.wae_latent).cuda()


    Q.load_state_dict(torch.load(args.Q, map_location=map_loc))
    P.load_state_dict(torch.load(args.P, map_location=map_loc))    
    encoder.load_state_dict(torch.load(args.enc_file, map_location=map_loc))
    generator.load_state_dict(torch.load(args.gen_file, map_location=map_loc))

    encoder.eval()
    generator.eval()
    Q.eval()
    P.eval()
    os.makedirs(img_dir, exist_ok=True)
    '''def rm_file(filename):
        if os.path.exists(filename): os.remove(os.path.join(img_dir,filename))
    rm_file('log.txt')
    rm_file('probabilities.csv')
    rm_file('every_action.csv')
    

    #printlog('frame, entropy, max_less_avg, advantage_max_less_avg, l2_z, first_step, action, distance, iterations, Q[a], avg_step_size, l2_mag_cf, p_delta_start_action, p_delta_target_action, gold_delta_start_action, gold_delta_target_action', 'every_action.csv')
    printlog("frame, probabilty_array, entropy, Qs, average(Qs), advantage_max_less_avg", img_dir, 'probabilities.csv')
    '''
    print('finished loading models')


    run_game(encoder, generator, agent, Q, P, envs, seed, img_dir, args.missing, frames_to_cf, speed, MAX_ITERS, args.cf_all_actions, args.salient_intensity, args.last_frame_diff)