Ejemplo n.º 1
0
# make a PLE instance.
p = PLE(game,
        fps=fps,
        frame_skip=frame_skip,
        num_steps=num_steps,
        force_fps=force_fps,
        display_screen=display_screen)

# our Naive agent!
agent = NaiveAgent(p.getActionSet())

# init agent and game.
p.init()

# lets do a random number of NOOP's
for i in range(np.random.randint(0, max_noops)):
    reward = p.act(p.NOOP)

# start our training loop
for f in range(nb_frames):
    # if the game is over
    if p.game_over():
        p.reset_game()

    obs = p.getScreenRGB()
    action = agent.pickAction(reward, obs)
    reward = p.act(action)

    if f % 50 == 0:
        p.saveScreen("screen_capture.png")
Ejemplo n.º 2
0
            print(IPost)
            print('actionIndex')
            print(actionIndex)
            print('reward: %f'%(reward))
            print('rewardAfterAdapt: %f'%(rewardAfterAdapt))
            print('points: %f'%(points))
        if recording and step%recordingSample==0:
           Trace['actionIndex'][step/recordingSample]=actionIndex
           Trace['reward'][step/recordingSample]=reward
           Trace['points'][step/recordingSample]=points
           Trace['rewardIncrease'][step/recordingSample]=rewardIncrease
           Trace['rewardAdaption'][step/recordingSample]=rewardAdaption
           ADSA.Recording()
           ALIFNArray.Record()
        if saveVideo and step>StarRcordFrames:
            p.saveScreen(capturePath+'frame%.9d.png'%(p.getFrameNumber()))

        #%%  
    if not os.path.exists(path):
        os.makedirs(path) 
    codepath=path+'src/'
    if not os.path.exists(codepath):
        os.makedirs(codepath) 
    for filename in os.listdir(os.getcwd()):
        if filename.endswith(".py"): 
            shutil.copy2(filename, codepath)
    NeuonNumber=0
    newSlice= [slice(None)]*3
    newSlice[1]=NeuonNumber
    Traces = Tracet, ADSA.Trace['Weighters'][newSlice], ADSA.Trace['WeighterVarRates'][newSlice], ADSA.Trace['WeighterInAxonConcentration'][newSlice], ADSA.Trace['WeightersCentre'][newSlice], ADSA.Trace['WeighterVarDamping'][newSlice], ADSA.Trace['EquivalentVolume'][newSlice]
#    figure1,figure2,figure3,figure4,figure5, figure6, figure7,figure8,figure9,ax = DSA.plot(TimOfRecording, Traces, path=path, savePlots=savePlots, StartTimeRate=1, linewidth= linewidth) #path=
reward = 0.0
max_noops = 20
nb_frames = 15000

#make a PLE instance.
p = PLE(game, fps=fps, frame_skip=frame_skip, num_steps=num_steps, 
	force_fps=force_fps, display_screen=display_screen)

#our Naive agent!
agent = NaiveAgent(p.getActionSet())

#init agent and game.
p.init()

#lets do a random number of NOOP's
for i in range(np.random.randint(0, max_noops)):
	reward = p.act(p.NOOP)

#start our training loop
for f in range(nb_frames):
	#if the game is over
        if p.game_over():
            p.reset_game()
            
        obs = p.getScreenRGB()
        action = agent.pickAction(reward, obs)
        reward = p.act(action)

	if f % 50 == 0:
		p.saveScreen("screen_capture.png")