Exemple #1
0
    def __init__(self, memory, shared, semaphore):
        multiprocessing.Process.__init__(self)
        # hyperparameters
        self.TRAIN_MAX = 10
        self.TRANSFER = 10
        self.BATCH_SIZE = 128
        #self.BATCH_SIZE = 5
        self.GAMMA = 0.99
        #self.SAMPLE_ALPHA = 0.5
        #self.SAMPLE_EPISLON = 0.
        #self.SAMPLE_BETA = 0.
        #self.SAMPLE_S = 44.8
        self.SAMPLE_S = 5.0
        self.SAMPLE_Q = 1.0

        LEARNING_RATE = 0.00025
        MOMENTUM = 0.95
        SQUARED_MOMENTUM = 0.95
        MIN_SQUARED_GRAD = 0.01

        self.net = DQN()  # Deep Net
        self.targetNet = DQN()
        self.copy_weights()
        self.net.setOptimizer(
            optim.RMSprop(self.net.parameters(),
                          lr=LEARNING_RATE,
                          momentum=MOMENTUM,
                          alpha=SQUARED_MOMENTUM,
                          eps=MIN_SQUARED_GRAD))
        self.memory = memory
        self.shared = shared  # shared resources, {'memory', 'SENT_FLAG'}
        self.semaphore = semaphore
Exemple #2
0
 def __init__(self, inputs):
     mp.Process.__init__(self)
     self.BATCH_SIZE = 32
     self.TRAIN_MAX = 500
     self.TRANSFER = 100
     self.GAMMA = 1.0
     LEARNING_RATE = 0.00025
     MOMENTUM = 0.95
     SQUARED_MOMENTUM = 0.95
     MIN_SQUARED_GRAD = 0.01
     self.demonet = DQN()
     self.targetnet = DQN()
     self.copy_weights()
     self.demonet.setOptimizer(
         optim.RMSprop(self.demonet.parameters(),
                       lr=LEARNING_RATE,
                       momentum=MOMENTUM,
                       alpha=SQUARED_MOMENTUM,
                       eps=MIN_SQUARED_GRAD))
     self.inputs = inputs
Exemple #3
0
    def __init__(self, shared):
        multiprocessing.Process.__init__(self)
        # hyperparameters
        self.TRAIN_MAX = 500
        self.TRANSFER = 100
        self.BATCH_SIZE = 32
        self.GAMMA = 1.0
        LEARNING_RATE = 0.00025
        MOMENTUM = 0.95
        SQUARED_MOMENTUM = 0.95
        MIN_SQUARED_GRAD = 0.01

        self.net = DQN()  # Deep Net
        self.targetNet = DQN()
        self.copy_weights()
        self.net.setOptimizer(
            optim.RMSprop(self.net.parameters(),
                          lr=LEARNING_RATE,
                          momentum=MOMENTUM,
                          alpha=SQUARED_MOMENTUM,
                          eps=MIN_SQUARED_GRAD))
        self.shared = shared  # shared resources, {'memory', 'SENT_FLAG'}
Exemple #4
0
#from Env.Environment import Environment
from Env.gymEnv_V2 import myGym
#from Env.gymEnv import myGym
from DQN.Improver_Q_Learning import Improver
from DQN.Evaluator_Dense_Q_Learning import Evaluator
from DQN.ReplayMemory import ReplayMemory
import os
os.system(
    "taskset -p 0xff %d" % os.getpid()
)  #https://stackoverflow.com/questions/15639779/why-does-multiprocessing-use-only-a-single-core-after-i-import-numpy

if __name__ == '__main__':
    # hyperparameters
    MEMORY_SIZE = 5000
    #MEMORY_SIZE = 5
    imp_net = DQN()
    # populate memory
    # let improver populate first
    manager = SyncManager()
    manager.start()
    memory = ReplayMemory(MEMORY_SIZE)
    s = multiprocessing.Semaphore(1)
    #memory = multiprocessing.Queue(MEMORY_SIZE)
    memory = manager.list()
    shared = manager.dict({'SENT_FLAG': True, 'weights': None})
    #shared = manager.dict({'memory':memory, 'SENT_FLAG':True, 'weights':None})
    #improver = Improver(imp_net, shared, myGym(), s)
    improver = Improver(imp_net, MEMORY_SIZE, memory, shared, myGym(), s)
    # improver is executed by the main process
    evaluator = Evaluator(memory, shared, s)
Exemple #5
0
from multiprocessing.managers import SyncManager
import torch.nn as nn
import time
from DQN.DQNcartpole import DQN
from Env.gymEnv import myGym
from DQN.CartPoleDQN import CartPoleDQN
from DQN.ReplayMemory import ReplayMemory

if __name__ == '__main__':
    demonet = DQN()
    #manager = SyncManager()
    #manager.start()
    memory = ReplayMemory(10000)
    #for i in range(memory.capacity):
    #    memory.push(torch.FloatTensor(1, 3, 40, 80))
    shared = dict({'memory': memory, 'SENT_FLAG': True, 'weights': None})
    p = CartPoleDQN(DQN(), shared, myGym())
    p.run()