Exemplo n.º 1
0
 def file_in(self):
     self.statusbar.showMessage('正在导入数据,请稍等.........')
     fileDialog = QFileDialog(self, filter="文本文件 (*.txt)")
     fileDialog.setWindowModality(Qt.WindowModal)
     fileDialog.exec_()
     if len(fileDialog.selectedFiles()) != 0:
         txtPath = fileDialog.selectedFiles()[0]
         self.set_in_widget_visible(True)
         self.thread = Worker(txtPath, self.path,
                              self.id_category_first_max,
                              self.id_webAddress_max, self)
         self.thread.signal_change_value.connect(
             lambda value: self.progressBar.setValue(value))
         self.thread.signal_update.connect(self.data_update)
         self.thread.signal_change_statusbar.connect(
             lambda message, time: self.statusbar.showMessage(
                 message, time))
         self.thread.signal_set_in_widget_visiable.connect(
             self.set_in_widget_visible)
         self.thread.signal_critical.connect(lambda: QMessageBox.critical(
             self, '错误', '文本内容格式错误,请选择正确文件', QMessageBox.Ok))
         self.thread.start()
     else:
         self.statusbar.showMessage('')
Exemplo n.º 2
0
    def on_message(self, message):
        if len(message) > 4 and message[0:4] == "CODE":
            message = message.split('-')
            self.codeTransmitted = ""
            self.className = message[1]
            self.waitLinesCode = True

        elif message == "ENDCODE":
            self.waitLinesCode = False
            print(self.codeTransmitted)
            with open("./module/process.py", 'w+', encoding='utf-8') as fd:
                fd.write(self.codeTransmitted)

            self.codeTransmitted = None

        elif message == "DATA":
            self.waitData = True
            self.data = ""

        elif message == "ENDDATA":
            self.waitData = False
            self.data = eval(self.data)
            print(self.data)

        elif message == "RUN":
            clazz = getattr(importlib.import_module("module.process"),
                            self.className)
            process = clazz()
            self.worker = Worker(self.data, process, self)
            self.worker.start()

        elif self.waitData:
            self.data += message

        elif self.waitLinesCode:
            self.codeTransmitted += message + "\n"
Exemplo n.º 3
0
    def __performTests(self):
        EARepository = PopulationRepository()
        PSORepository = PopulationRepository()
        acoProblem = ACOProblem(7, 1000, 40, 1.9, 0.9, 0.05, 0.5)

        self.__evolutionaryController = EvolutionaryController(
            EARepository, 7, 40, 0.5, 1000)
        self.__hillClimbingController = HillClimbingController(7, 1000)
        self.__psoController = PSOController(PSORepository, 7, 100, 50, 0.5,
                                             1.1, 1.5, 10)
        self.__acoController = ACOController(acoProblem)

        self.__readInputEvolutionary()
        self.__readInputHillClimbing()
        self.__readInputPSO()
        self.__readInputACO()

        worker = Worker(self.__doTheTests)
        #worker.signals.result.connect(self.__printTestResults)
        worker.signals.finished.connect(self.__enableButtons)

        self.__disableButtons()

        self.threadpool.start(worker)
Exemplo n.º 4
0
    def __init__(self):
        self.tableQueue = queue.Queue()
        self.keyBindQueue = queue.Queue()
        self.keyPressQueue = queue.Queue()
        self.apps = None

        QtCore.QThread.currentThread().setObjectName("MAIN")
        self.thread = QtCore.QThread()
        self.thread.name = "auto_refresh"
        self.worker = Worker(self.tableQueue)
        self.worker.moveToThread(self.thread)
        self.worker.update_table.connect(self.updateTable)

        self.threadTwo = QtCore.QThread()
        self.threadTwo.name = "key_searcher"
        self.keyboardShortcuts = keyboardShortcuts(self.keyBindQueue,
                                                   self.keyPressQueue)
        self.keyboardShortcuts.moveToThread(self.threadTwo)
        self.keyboardShortcuts.new_key_press.connect(self.ChangeVolume)
        self.session = []
        self.appKeyMap = {}
        self.comboMap = {}
        self.keyboardShortcuts.start()
        self.worker.start()
Exemplo n.º 5
0
    def start(self):
        tags = self.lineEdit_tags.text()
        path = self.lineEdit_savepath.text()
        start_page = self.lineEdit_startpage.text()
        if start_page == '':
            start_page = 1

        self.thread = Worker(tags, path, start_page, token, chan_cookies)
        self.thread.set_processbar.connect(self.set_processbar)
        self.thread.log_append.connect(self.log_append)
        self.thread.log_moveCursor.connect(self.log_moveCursor)
        self.thread.enable_start_button.connect(self.enable_start_button)
        self.thread.disable_start_button.connect(self.disable_start_button)
        self.thread.enable_stop_button.connect(self.enable_stop_button)
        self.thread.disable_stop_button.connect(self.disable_stop_button)
        self.thread.set_infolabel.connect(self.set_infolabel)
        self.thread.set_speedlabel.connect(self.set_speedlabel)
        self.thread.set_startbutton_text.connect(self.set_startbutton_text)
        self.thread.info_message.connect(self.info_message)
        self.thread.notifications.connect(self.notifications)
        self.thread.set_process.connect(self.set_process)
        self.thread.save_failed_list.connect(self.save_failed_list)

        self.thread.start()
Exemplo n.º 6
0
# create and start threads
threads = []
n_threads = 4

fnos = list(range(0, 3000, 10))
n_threads = 1  # n_threads is the number of worker threads to read video frame
tasks = [[]
         for _ in range(0, n_threads)]  # store frame number for each threads
frame_per_thread = math.ceil(len(fnos) / n_threads)

tid = 0
for idx, fno in enumerate(fnos):
    tasks[math.floor(idx / frame_per_thread)].append(fno)

for _ in range(0, n_threads):
    w = Worker()
    threads.append(w)
    w.start()

results = queue.Queue(maxsize=100)
on_done = lambda x: results.put(x)
# distribute the tasks from main to worker threads
for idx, w in enumerate(threads):
    w.decode(video_path, tasks[idx], on_done)


def analyze_gaze(frame):
    gaze.refresh(frame)

    frame = gaze.annotated_frame()
    text = ""
Exemplo n.º 7
0
from functions import handle_new_page


def initialize_logger(log_level: str):
    logging_level = logging.getLevelName(log_level)
    logging_format = '%(asctime)s %(module)s:%(lineno)d %(levelname)-8s %(message)s'
    logging.basicConfig(level=logging_level, format=logging_format)


if __name__ == "__main__":
    cm = ConfigManager()
    crawler_config = cm.get_crawler_params()
    initialize_logger(crawler_config['log_level'])

    logging.warning('Running with params:')
    pprint(crawler_config)

    seed_urls = crawler_config['seed_urls']
    coredb = CoreDb()
    for url in seed_urls:
        handle_new_page(coredb, url, crawler_config, Worker.locks)

    threads = list()
    for index in range(crawler_config['workers']):
        x = Worker(crawler_config)
        threads.append(x)
        x.start()

    for index, thread in enumerate(threads):
        thread.join()
Exemplo n.º 8
0
#!/usr/bin/env python3

from Worker import Worker
from time import sleep


def function(inputs):
    sleep(3)
    return {"res": inputs['key2'] * 2}


worker = Worker(5, 1)

worker.start(function)
Exemplo n.º 9
0
                                  name='global_episodes',
                                  trainable=False)
    trainer = tf.train.RMSPropOptimizer(learning_rate=7e-4,
                                        epsilon=0.1,
                                        decay=0.99)
    master_network = Network(height, width, depth, s_size, a_size, 'global',
                             None)  # Generate global network
    num_workers = multiprocessing.cpu_count(
    )  # Set workers ot number of available CPU threads

    print 'Creating', num_workers, 'workers'
    workers = []
    # Create worker classes
    for i in range(num_workers):
        workers.append(
            Worker(game_env, i, (height, width, depth, s_size), a_size,
                   trainer, model_path, global_episodes))
    saver = tf.train.Saver(max_to_keep=5)

with tf.Session() as sess:
    coord = tf.train.Coordinator()

    if load_model:
        print('Loading Model...')
        ckpt = tf.train.get_checkpoint_state(model_path)
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.global_variables_initializer())

    # This is where the asynchronous magic happens.
    # Start the "work" process for each worker in a separate threat.
    worker_threads = []
Exemplo n.º 10
0
#!/usr/bin/env python

from Worker import Worker 
from time import sleep

def function(inputs):
  sleep(3)
  return {
    "res": inputs['key2'] * 2
  }

worker = Worker(5,)


worker.start(function)

Exemplo n.º 11
0
    def updateBerita(self, beginDate, endDate):
        """
        Tambah data entri berita

        Input:
        beginDate -- integer
        Awal tanggal pencarian berita dengan format
        beginDate = beginyear * 10000 + beginMonth * 100 + beginDay
        
        endDate -- integer
        Akhir tanggal pencarian berita dengan format
        beginDate = endyear * 10000 + endMonth * 100 + endDay
        
        Output:
        Sebuah integer menunjukan berapa banyak data berita yang ditambah
        """
        if beginDate > endDate:
            raise ValueError(
                "beginDate (%d) harus secara kronologis sebelum endDate (%d)" %
                (beginDate, endDate))

        beginDate = datetime.datetime(beginDate // 10000,
                                      beginDate // 100 % 100, beginDate % 100)
        endDate = datetime.datetime(endDate // 10000, endDate // 100 % 100,
                                    endDate % 100)

        for I in FileParser.FileParser.parserList:
            parser = FileParser.FileParser(I)
            print(I)
            date = beginDate
            worker = Worker()
            while date <= endDate:
                newsList = parser.extractPublikasi(date.year * 10000 +
                                                   date.month * 100 + date.day)
                print(date.year * 10000 + date.month * 100 + date.day)
                for J in newsList:
                    #Cek apakah artikel sudah pernah di-crawl?
                    self.c.execute("SELECT * FROM berita where Url = ?",
                                   (J['url'], ))
                    d = self.c.fetchall()
                    #Kalau belum, masukan
                    if len(d) == 0:
                        worker.addOrder(J['url'])
                result = worker.getData()
                print(len(result))
                for J in newsList:
                    if J['url'] in result:
                        r = result[J['url']]
                        nextTwoSession = self.cariSesi(J['tanggal'], J['jam'])
                        # Sentimen positif = 1
                        # Sentimen netral = 0
                        # Sentimen negatif = -1

                        # Untuk awal, diasumsikan semua berita ketika harga naik adalah sentimen
                        # positif dan diasumsikan semua berita ketika harga turun adalah
                        # sentimen negatif
                        sentiment = int(
                            self.bandingIndeks(nextTwoSession) > 0) * 2 + -1
                        d = parser.extractData(r)
                        if d != None:
                            self.c.execute(
                                "INSERT INTO berita VALUES (?,?,?,?,?,?,?,?)",
                                (J['sumber'], J['judul'], J['url'], d, '',
                                 J['tanggal'], J['jam'], sentiment))
                date += datetime.timedelta(1)
                worker.reset()
                self.conn.commit()
Exemplo n.º 12
0
'''
@author: vbegun
'''

from Worker import Worker

if __name__ == '__main__':
    pass

bob = Worker('Bob Smith', 50000)
print(bob.getPay())
Exemplo n.º 13
0
 if PREVIOUS_SESSION:
     for i, (name, net) in enumerate(zip(workernames, networks)):
         env = CONFIG.env
         saver_ = saver
         if i is not 0:
             saver_ = None
             env = getEnv(CONFIG.GAME, CONFIG.INIT_PORT + i, CONFIG.N_A,
                          CONFIG.ACTION_REPEAT, CONFIG.RANDOM_SEED)
         else:
             print("first worker, we reuse existing env used for init")
         workers.append(
             Worker(name,
                    net,
                    SESS,
                    COORD,
                    CONFIG,
                    unique_mutex,
                    summary_writer,
                    env=env,
                    saver=saver_))
 else:
     for i, (name, net) in enumerate(zip(workernames, networks)):
         env = CONFIG.env
         saver_ = saver
         if i is not 0:
             saver_ = None
             env = getEnv(CONFIG.GAME, CONFIG.INIT_PORT + i + 1, CONFIG.N_A,
                          CONFIG.ACTION_REPEAT, CONFIG.RANDOM_SEED)
         workers.append(
             Worker(name,
                    net,
Exemplo n.º 14
0
#!/usr/local/bin/python3
from Worker import Worker

if __name__ == '__main__':
    worker = Worker()
    worker.consume()
Exemplo n.º 15
0
 def add_worker(self):
     worker = Worker(len(self.workers), self)
     self.workers.append(worker)
     self.workers[-1].start()
     return worker
Exemplo n.º 16
0
    grad_queue = mp.Queue()
    rewards_queue = mp.Queue(maxsize=benchmark_freq)
    rewards_list = []

    # Worker Params
    n_steps = 5
    frame_dimensions = [84, 84]
    gamma = 0.99
    env_name = "MsPacman-v0"

    print("Starting to create {} workers...".format(num_workers))
    workers = []
    for i in range(num_workers):
        color = colours[i % len(colours)]
        worker_id = "worker_{}".format(i + 1)
        worker = Worker(worker_id, env_name, network, n_steps, gamma, color,
                        grad_queue, rewards_queue)
        workers.append(worker)

    threads = []
    for worker in workers:
        t = mp.spawn(run,
                     args=(worker, ),
                     nprocs=1,
                     join=False,
                     daemon=False,
                     start_method='spawn')
        threads.append(t)

    grads = None
    grad_count = 0
    batch_size = 64
Exemplo n.º 17
0
    summary_writer = []
    for id in range(n_threads):
        summary_writer.append(tf.summary.FileWriter(args.summary_dir+'/worker_'+str(id)))

    summary_parameters = Summary_Parameters()
    write_op = tf.summary.merge_all()
    master_network = A3C_Network(args, no_action, 'master_network')
    workers = []
    env_list = []
    for id in range(n_threads):
        env = gym.make(args.environment)

        if id == 0:
            env = gym.wrappers.Monitor(env, "monitors", force=True)

        workers.append(Worker(global_episodes, training_episodes, master_network, id, learning_rate, env, summary_writer[id],
                              summary_parameters, write_op, args))
        env_list.append(env)


    with tf.Session() as sess:
        saver = tf.train.Saver(max_to_keep=5)
        master_network.load_model(sess, saver)
        coord = tf.train.Coordinator()
        thread_list = []
        for id in range(n_threads):
            t = threading.Thread(target=workers[id].process, args=(sess, coord, saver))
            t.start()
            sleep(0.5)
            thread_list.append(t)

        coord.join(thread_list)
Exemplo n.º 18
0
if __name__ == "__main__":

    startTime = time.time()

    #conf = SparkConf().setMaster("local").setAppName("Test")
    conf = SparkConf().setMaster("spark://localhost:7077").setAppName("Test")

    # sc = SparkContext(conf=conf, pyFiles=["/home/crist/PycharmProjects/DistributedTest/dependencies.zip"])
    sc = SparkContext(conf=conf)

    scenarioSize = 20

    workerList = []
    scenarioList = []
    for i in range(scenarioSize):
        workerList.append(Worker(i))
        scenarioList.append("Scenario " + str(i))

    parallelWorkerList = sc.parallelize(zip(workerList,
                                            scenarioList)).persist()

    for x in range(20):
        var1 = randint(100, 10000)
        var2 = randint(100, 10000)
        print("Initializing iteration " + str(x))

        print("Updating variables: ")
        print("\tvar1: " + str(var1))
        print("\tvar2: " + str(var2))

        broadcast1 = sc.broadcast(var1)
Exemplo n.º 19
0
                          momentum=0.9)
    lr_scheduler = optim.lr_scheduler.StepLR(optimizer,
                                             step_size=8000,
                                             gamma=0.1)

# resume from last time if necessary:
epoch_offset = 0
step_offset = 0
if ckpt_dir:
    ckpt = torch.load(ckpt_dir, map_location=device)
    epoch_offset = ckpt['epoch_offset']
    step_offset = ckpt['step_offset']
    model.load_state_dict(ckpt['state_dict'])
if optim_load:
    optimizer.load_state_dict(ckpt['optim'])
print('resume from epoch: {}, step: {}'.format(epoch_offset - 1,
                                               step_offset - 1))

# just before work: setup logger
log_folder = log_folder if log_folder else str(
    datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
writer = SummaryWriter(log_root + '/{}'.format(log_folder)
                       )  # TensorboardX: https://zhuanlan.zhihu.com/p/35675109

# work, the main loop!
worker = Worker(model, optimizer, lr_scheduler, criterion, epochs,
                epoch_offset, step_offset, train_loader, val_loader,
                test_loader, device, writer, log_root, log_folder)

print(worker.work())
Exemplo n.º 20
0
with tf.device("/cpu:0"):
    global_episodes = tf.Variable(0,
                                  dtype=tf.int32,
                                  name='global_episodes',
                                  trainable=False)
    trainer = tf.train.AdamOptimizer(learning_rate=1e-4)
    master_network = AC_Network(s_size, a_size, 'global',
                                None)  # Generate global network
    num_workers = multiprocessing.cpu_count(
    )  # Set workers to number of available CPU threads

    workers = []
    # Create worker classes
    for i in range(int(num_workers / 2)):
        workers.append(
            Worker(Environment(), i, s_size, a_size, trainer, model_path,
                   global_episodes))
    saver = tf.train.Saver(max_to_keep=5)

with tf.Session() as sess:
    coord = tf.train.Coordinator()
    if load_model == True:
        print('Loading Model...')
        ckpt = tf.train.get_checkpoint_state(model_path)
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.global_variables_initializer())

    # Start the "work" process for each worker in a separate threat.
    worker_threads = []
    for worker in workers:
        worker_work = lambda: worker.work(max_episode_length, gamma, sess,
Exemplo n.º 21
0
 def __init__(self, conf_file):
     self.worker = Worker(conf_file)
Exemplo n.º 22
0
    def multi_solve_environment(self):
        workers_top20 = []

        for arch_epoch in range(self.arch_epochs):
            results_queue = Queue()
            processes = []

            for episode in range(self.episodes):
                actions_p, actions_log_p, actions_index = self.controller.sample(
                )
                actions_p = actions_p.cpu().numpy().tolist()
                actions_log_p = actions_log_p.cpu().numpy().tolist()
                actions_index = actions_index.cpu().numpy().tolist()

                if episode < self.episodes // 3:
                    worker = Worker(actions_p, actions_log_p, actions_index,
                                    self.args, 'cuda:0')
                elif self.episodes // 3 <= episode < 2 * self.episodes // 3:
                    worker = Worker(actions_p, actions_log_p, actions_index,
                                    self.args, 'cuda:1')
                else:
                    worker = Worker(actions_p, actions_log_p, actions_index,
                                    self.args, 'cuda:3')

                process = Process(target=consume, args=(worker, results_queue))
                process.start()
                processes.append(process)

            for process in processes:
                process.join()

            workers = []
            for episode in range(self.episodes):
                worker = results_queue.get()
                worker.actions_p = torch.Tensor(worker.actions_p).to(
                    self.device)
                worker.actions_index = torch.LongTensor(
                    worker.actions_index).to(self.device)
                workers.append(worker)

            for episode, worker in enumerate(workers):
                if self.baseline == None:
                    self.baseline = worker.acc
                else:
                    self.baseline = self.baseline * self.baseline_weight + worker.acc * (
                        1 - self.baseline_weight)

            # sort worker retain top20
            workers_total = workers_top20 + workers
            workers_total.sort(key=lambda worker: worker.acc, reverse=True)
            workers_top20 = workers_total[:20]
            top1_acc = workers_top20[0].acc
            top5_avg_acc = np.mean(
                [worker.acc for worker in workers_top20[:5]])
            top20_avg_acc = np.mean([worker.acc for worker in workers_top20])
            logging.info(
                'arch_epoch {:0>3d} top1_acc {:.4f} top5_avg_acc {:.4f} top20_avg_acc {:.4f} baseline {:.4f} '
                .format(arch_epoch, top1_acc, top5_avg_acc, top20_avg_acc,
                        self.baseline))
            for i in range(5):
                print(workers_top20[i].genotype)

            for ppo_epoch in range(self.ppo_epochs):
                loss = 0

                for worker in workers:
                    actions_p, actions_log_p = self.controller.get_p(
                        worker.actions_index)

                    loss += self.cal_loss(actions_p, actions_log_p, worker,
                                          self.baseline)

                loss /= len(workers)
                logging.info('ppo_epoch {:0>3d} loss {:.4f} '.format(
                    ppo_epoch, loss))

                self.adam.zero_grad()
                loss.backward()
                self.adam.step()
Exemplo n.º 23
0
                     default=10,
                     help="Update the Tensorboard every X steps.")

args = parser.parse_args()

tf.reset_default_graph()

model_path = os.path.join('./model/', args.game_name)
if not os.path.exists(model_path):
    os.makedirs(model_path)

# create openai game
env = gym.make(args.game_name)

n_actions = env.action_space.n
global_frame_counter = 0
with tf.Session() as sess:

    with tf.device("/cpu:0"):
        # create global network
        global_network = A3C_Network(
            args, n_actions, trainer=None,
            scope=GLOBAL_NETWORK_NAME)  # Generate global network

        worker = Worker(args, 0, model_path, None, global_network, None, None,
                        None)

        sess.run(tf.global_variables_initializer())

        worker.evaluate(sess)
Exemplo n.º 24
0
    def start(self):
        workers = []
        network_params = (self.NUM_STATE,
                          self._config['Max steps taken per batch'],
                          self.NUM_ACTIONS, self.ACTION_SPACE)

        # Init Global and Local networks. Generate Weights for them as well.
        if self._config['CNN type'] == '':
            self._global_model = AC_Model_Large(self.NUM_STATE,
                                                self.NUM_ACTIONS,
                                                self._config,
                                                is_training=True)
            self._global_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
            self._step_model = AC_Model_Large(self.NUM_STATE,
                                              self.NUM_ACTIONS,
                                              self._config,
                                              is_training=True)
            self._step_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
        else:
            self._global_model = CNN_class_import(
                self._config['CNN type'],
                (self.NUM_STATE, self.NUM_ACTIONS, self._config, True))
            self._global_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))
            self._step_model = CNN_class_import(
                self._config['CNN type'],
                (self.NUM_STATE, self.NUM_ACTIONS, self._config, True))
            self._step_model(
                tf.convert_to_tensor(np.random.random((1, *self.NUM_STATE)),
                                     dtype='float64'))

        # Load model if exists
        if not os.path.exists(self._model_save_path):
            os.makedirs(self._model_save_path)
        else:
            try:
                if (os.path.exists(self._model_save_path + "\checkpoint")):

                    self._global_model.load_model_weights()
                    self._step_model.load_model_weights()
                    for env in self._envs:
                        workers.append(
                            Worker(self._step_model,
                                   env,
                                   batch_size=self.
                                   _config['Max steps taken per batch'],
                                   render=False))

                    print("Model restored.")

                else:

                    for env in self._envs:
                        workers.append(
                            Worker(self._step_model,
                                   env,
                                   batch_size=self.
                                   _config['Max steps taken per batch'],
                                   render=False))

                    print("Creating new model.")
            except:
                print("ERROR: There was an issue loading the model!")
                raise

        coordinator = Coordinator(self._global_model, self._step_model,
                                  workers, self._plot, self._model_save_path,
                                  self._config)

        # Train and save
        try:
            if coordinator.run():
                try:
                    self._global_model.save_model_weights()
                    print("Model saved.")
                    return True
                except:
                    print("ERROR: There was an issue saving the model!")
                    raise

        except:
            print("ERROR: There was an issues during training!")
            raise
Exemplo n.º 25
0
 def __init__(self, num_threads):
     self.tasks = Queue(num_threads)
     for _ in range(num_threads):
         Worker(self.tasks)
Exemplo n.º 26
0
 def __init__(self):
     self._crewId = None
     self._worker1 = Worker()
     self._worker2 = Worker()
     self._worker3 = Worker()           
Exemplo n.º 27
0
weight_path = "model/a3c-{}-weight".format(env_name.lower())
load_weights = True
max_episode_each_worker = 50000

per_global_episode_play = 25
num_episode_play = 5

gamma = 0.99			# Future reward discount rate.
alpha_actor = 0.00001	# Learning rate for actors.
alpha_critic = 0.0001	# Learning rate for critics.

sess = tf.Session()

with sess.as_default(), sess.graph.as_default():
	# Create main agent.
	MainAgent = Worker("Main", env_name, gamma, alpha_actor, alpha_critic, sess)

	# Create workers.
	Workers = [
		Worker("Worker-{}".format(w_id), env_name, gamma, alpha_actor, alpha_critic, sess)
		for w_id in range(1, num_workers+1)
	]

	global_episode = tf.Variable(0.0, dtype=tf.float32, name="global_episode", trainable=False)
	global_episode_increment_name = global_episode.assign_add(1.0, name="global_episode_increment").name
	coord = tf.train.Coordinator()

	sess.run(tf.global_variables_initializer())

	# Only save main weights to disk.
	# Workers' initial weights are already copied from main network.
Exemplo n.º 28
0
 def setUp(self):
     self.worker = Worker(conf_file)
     self.config = self.worker.config
     self.tasks = self.config.sections()
     self.last_tasks = {}
Exemplo n.º 29
0
def create_new_worker():
    create_new_worker.num_of_created += 1
    new_worker = Worker(name="worker_" + str(create_new_worker.num_of_created))
    return Process(target=new_worker.run), "worker_" + str(create_new_worker.num_of_created)
Exemplo n.º 30
0
    if ENV_NAME == "Breakout-v0":
        image_crop = [31, 195]
    elif ENV_NAME == "MsPacman-v0":
        image_crop = [0, -50]
    else:
        image_crop = [0, -0]

    print("Starting to create {} workers...".format(num_workers))
    workers = []
    for i in range(num_workers):
        color = colours[i % len(colours)]
        worker_id = "worker_{}".format(i + 1)
        image_processor = GameImageProcessor(image_crop, frame_dimensions)

        worker = Worker(worker_id, ENV_NAME, network, n_steps, gamma, color,
                        grad_queue, rewards_queue, image_processor)
        workers.append(worker)

    threads = []
    for worker in workers:
        t = mp.spawn(run,
                     args=(worker, ),
                     nprocs=1,
                     join=False,
                     daemon=False,
                     start_method='spawn')
        threads.append(t)

    grads = None
    grad_count = 0
    batch_size = 64