コード例 #1
0
    def __init__(self):
        #初始化分布式进程中的工作节点的连接工作
        # 实现第一步:使用BaseManager注册获取Queue的方法名称
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')
        # 实现第二步:连接到服务器:
        #server_addr = '192.168.100.41'
        server_addr = '0.0.0.0'
        print(('Connect to server %s...' % server_addr))
        # 端口和验证口令注意保持与服务进程设置的完全一致:
        self.m = BaseManager(address=(server_addr, 8001),
                             authkey='baike'.encode('utf-8'))
        # 从网络连接:
        self.m.connect()
        # 实现第三步:获取Queue的对象:
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()
        #初始化网页下载器和解析器
        self.downloader = HtmlDownloader()

        readRemoteCfg = ReadRemoteCfg(server_addr, 2007).get_cfg()
        print(readRemoteCfg)
        self.parser = HtmlParser(readRemoteCfg)
        print('init finish')
コード例 #2
0
ファイル: SpiderWork.py プロジェクト: yaodalu/crawler
    def __init__(self):
        #初始化分布式进程中工作节点的连接工作
        #实现第一步:使用BaseManager注册用于获取QueQue的方法名称?
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')

        #实现第二步:连接到服务器
        server_addr = '127.0.0.1'
        print('Connect to server %s...' % server_addr)

        #注意保持端口和验证口令与服务进程设置的完全一致
        self.m = BaseManager(address=(server_addr, 8001), authkey='baike')

        #从网络连接
        self.m.connect()

        #实现第三步:获取Queue的对象
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()

        #初始化网页下载器和解析器
        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        print 'init finish'
コード例 #3
0
def integrate_save_score_table_parallel(cluster_score_l,
                                        label_l,
                                        config,
                                        save=False):
    start_time = time.time()

    manager = BaseManager()
    manager.register('IntegrateScoreTable', IntegrateScoreTable)
    manager.start()
    parallel_obj = manager.IntegrateScoreTable(
        cluster_score_l, label_l, config['n_item'],
        multiprocessing.cpu_count() // 10 * 9)
    res_l = []
    pool = multiprocessing.Pool(multiprocessing.cpu_count() // 10 * 9)
    for i in range(multiprocessing.cpu_count() // 10 * 9):
        res = pool.apply_async(score_table_parallel, args=(parallel_obj, i))
        res_l.append(res)
    pool.close()
    pool.join()

    score_table = [0] * cluster_score_l[0].shape[0]
    for tmp_res in res_l:
        tmp_res = tmp_res.get()
        for idx in tmp_res:
            score_table[idx] = tmp_res[idx]

    score_table = np.array(score_table, dtype=np.float32)

    if save:
        total_score_table_dir = '%s/total_score_table.npy' % config[
            'program_train_para_dir']
        dir_io.save_numpy(total_score_table_dir, score_table)
    end_time = time.time()
    intermediate = {'time': end_time - start_time}
    print('save score table success')
    return score_table, intermediate
コード例 #4
0
ファイル: worker.py プロジェクト: qiyeboy/DoubanRobot
def worker():
    # load 'session.txt', or call login() to generate it
    try:
        with open('session.txt', 'rb') as f:
            headers = cPickle.load(f)
            cookies = cPickle.load(f)
    except:
        print '[-] 无session.txt文件, 调用login()...'
        session = DoubanLogin().login()
        headers = session.headers
        cookies = session.cookies

    # connect to manager
    BaseManager.register('get_task_queue')
    BaseManager.register('get_result_queue')
    print 'Connect to server %s:5000...' % server_addr
    worker = BaseManager(address=(SERVER_ADDR, PORT), authkey='douban')
    worker.connect()
    task = worker.get_task_queue()
    result = worker.get_result_queue()

    # start listening
    print '[-] Waiting...'
    while True:
        try:
            id_ = task.get()
            print '[~] Running task...'
            info = get_user_info.get_info(id_, headers=headers, cookies=cookies)
            print '[+] Information returned.\n'
            result.put(info)
            print '[-] Waiting...'
            time.sleep(DELAY_TIME)

        except Exception, e:
            print e
            exit()
コード例 #5
0
def test():
    #windows下绑定调用接口不能使用lambda,所以只能先定义函数再绑定
    BaseManager.register('get_task',callable = gettask);
    BaseManager.register('get_result',callable = getresult);
    #绑定端口并设置验证码,windows下需要填写ip地址,linux下不填默认为本地
    manager = BaseManager(address = ('127.0.0.1',5000),authkey = b'123');
    #启动
    manager.start();
    try:
        #通过网络获取任务队列和结果队列
        task = manager.get_task();
        result = manager.get_result();

        #添加任务
        for i in range(task_number):
            n = random.randint(0, 10000)
            print('Put task %d...' % n)
            task.put(n);

        #每秒检测一次是否所有任务都被执行完
        while not result.full():
            time.sleep(1);

        for i in range(result.qsize()): 
            r = result.get(timeout=10)
            print('Result: %s' % r)

        print('all over')
        
    
    except Exception as e:
        print('Manager error:',e);
    finally:
        #一定要关闭,否则会爆管道未关闭的错误
        time.sleep(1);
        manager.shutdown();
コード例 #6
0
ファイル: chapter20.py プロジェクト: soon-tw/whole-Python
    def start(self):
        # 注册队列
        BaseManager.register("get_task_queue", callable=self.get_task_que)
        BaseManager.register("get_result_queue", callable=self.get_result_que)

        # 监听
        manager = BaseManager(address=('0.0.0.0', 5000), authkey=b'abc')
        manager.start()

        # 使用网络服务获取上面的队列,不能直接获取
        task_queue = manager.get_task_queue()
        result_queue = manager.get_result_queue()

        # 设置任务
        for i in range(10):
            r = '任务' + i
            task_queue.put(r)

        # 监听result
        for i in range(10):
            res = result_queue.get()
            print('res is {}'.format(res))

        manager.shutdown()
コード例 #7
0
  def __init__(self):
    import tensorflow as tf
    from tensorflow.python import keras
    from tensorflow.python.keras import layers, Model
    from tensorflow.python.keras.layers import Dense, Flatten,Activation
    import keras.backend as K  
    tf.enable_eager_execution()
    K.set_session(tf.Session())
    self.game_name = 'bombermandiehard-v0'
    save_dir = args.save_dir
    self.save_dir = save_dir
    if not os.path.exists(save_dir):
      os.makedirs(save_dir)

    env = gym.make(self.game_name)
    self.state_size = env.observation_space.shape[0]*env.observation_space.shape[1]
    print(self.state_size)
    self.action_size = env.action_space.n
    self.opt = tf.train.AdamOptimizer(args.lr, use_locking=True)
    print(self.state_size, self.action_size)
    BaseManager.register('Model', Model)
    BaseManager.register('ActorCriticModelHolder', ActorCriticModelHolder)
    
    BaseManager.register('Dense', Dense)
    BaseManager.register('Flatten', Flatten)
    BaseManager.register('Activation', Activation)
    self.manager = BaseManager()
    self.manager.start()
    self.global_model = self.manager.ActorCriticModelHolder()
    
    self.global_model.initialize(self.state_size, self.action_size) # global network
    print("After creation")
    print(self.global_model,flush=True)
    initial= generate_state(env.reset(),WINDOW_LENGTH)
    print(("initial.shape",initial.shape))
    print(self.global_model.call(tf.convert_to_tensor([initial],dtype=tf.float32)))
コード例 #8
0
def plot(rewards):
    clear_output(True)
    plt.figure(figsize=(20, 5))
    plt.plot(rewards)
    plt.savefig('td3_multi_thre.png')
    # plt.show()


if __name__ == '__main__':
    replay_buffer_size = 1e6
    # replay_buffer = ReplayBuffer(replay_buffer_size)

    # the replay buffer is a class, have to use torch manager to make it a proxy for sharing across processes
    BaseManager.register('ReplayBuffer', ReplayBuffer)
    manager = BaseManager()
    manager.start()
    replay_buffer = manager.ReplayBuffer(
        replay_buffer_size)  # share the replay buffer through manager

    # hyper-parameters for RL training
    max_episodes = 5000
    max_steps = 100
    batch_size = 256
    explore_steps = 0  # for random action sampling in the beginning of training
    update_itr = 1
    explore_noise_scale = 1.0
    eval_noise_scale = 0.5
    reward_scale = 1.0
    action_range = 4.
    state_dim = 91
コード例 #9
0
    def run(self):

        retries = self.options.retry_failed + 1
        completed = 0

        BaseManager.register('LifoQueue', queue.LifoQueue)
        manager = BaseManager()
        manager.start()

        self.results = ExecutionCounter(total=len(self.instances))
        pipeline = manager.LifoQueue()
        done_queue = manager.LifoQueue()

        # Set number of jobs
        if self.options.jobs:
            self.jobs = self.options.jobs
        elif self.options.build_only:
            self.jobs = multiprocessing.cpu_count() * 2
        else:
            self.jobs = multiprocessing.cpu_count()
        logger.info("JOBS: %d" % self.jobs)

        self.update_counting()

        logger.info(
            "%d test scenarios (%d configurations) selected, %d configurations discarded due to filters."
            % (len(self.suites), len(
                self.instances), self.results.skipped_configs))

        while True:
            completed += 1

            if completed > 1:
                logger.info("%d Iteration:" % (completed))
                time.sleep(self.options.retry_interval
                           )  # waiting for the system to settle down
                self.results.done = self.results.total - self.results.failed
                if self.options.retry_build_errors:
                    self.results.failed = 0
                    self.results.error = 0
                else:
                    self.results.failed = self.results.error

            self.execute(pipeline, done_queue)

            while True:
                try:
                    inst = done_queue.get_nowait()
                except queue.Empty:
                    break
                else:
                    inst.metrics.update(self.instances[inst.name].metrics)
                    inst.metrics["handler_time"] = inst.execution_time
                    inst.metrics["unrecognized"] = []
                    self.instances[inst.name] = inst

            print("")

            retries = retries - 1
            # There are cases where failed == error (only build failures),
            # we do not try build failures.
            if retries == 0 or (self.results.failed == self.results.error
                                and not self.options.retry_build_errors):
                break
コード例 #10
0
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 22:21:06 2020

@author: Sina
"""

from multiprocessing.managers import BaseManager, SyncManager
import random
import time
from threading import Thread

BaseManager.register('queue')

manager = BaseManager(address=('localhost', 50020), authkey=b'mymanager')
manager.connect()


def producer(i):

    a = random.randint(1, 4)

    data = [1, 3, 5, 7, 8, 3, 5, 7]

    i = 0
    while i < len(data):
        q = manager.queue()
        print('P - {} Produced {}'.format(i, data[i]))
        q.put(data[i])
        time.sleep(a)
        i += 1
コード例 #11
0
def get_manager():
    manager = BaseManager()
    manager.start()
    return manager
コード例 #12
0
def start_session(ip, port, passwd):
    mgr = BaseManager(address=(ip, port), authkey=passwd)
    mgr.connect()
    return mgr
コード例 #13
0
from multiprocessing.managers import BaseManager

m = BaseManager(address=('127.0.0.1', 5000), authkey='abcd')
server = m.connect()



コード例 #14
0
ファイル: 1-server.py プロジェクト: winghou/spiders
result_queue = Queue()  #结果


def return_task():  #返回任务队列
    return task_queue


def return_result():  #返回结果队列
    return result_queue


if __name__ == '__main__':
    multiprocessing.freeze_support()
    BaseManager.register("get_task", callable=return_task)
    BaseManager.register("get_result", callable=return_result)
    manger = BaseManager(address=("127.0.0.1", 8888),
                         authkey="123456".encode())
    manger.start()
    task, result = manger.get_task(), manger.get_result()

    file = open("info.txt", "wb")

    url = "https://m.weibo.cn/api/container/getSecond?containerid=1005055303733126_-_FANS"
    gettaskapi("18879091455", "asdw09065991", url)

    print("等待数据处理:----------------------")

    while True:
        try:
            reslist = result.get(timeout=50)
            print("得到数据:", reslist[0])
            task.put(reslist[1])
コード例 #15
0
def startup():
    global procs
    global pipes
    global param
    global running
    
    # Settings
    cfg = Settings.CSettings()
    param['CSettings'] = cfg
    
    # Logfile
    if cfg.getSetting('logpath').startswith('.'):
        # relative to current path
        logpath = sys.path[0] + sep + cfg.getSetting('logpath')
    else:
        # absolute path
        logpath = cfg.getSetting('logpath')
    
    param['LogFile'] = logpath + sep + 'PlexConnect.log'
    param['LogLevel'] = cfg.getSetting('loglevel')
    dinit('PlexConnect', param, True)  # init logging, new file, main process
    
    dprint('PlexConnect', 0, "Version: {0}", __VERSION__)
    dprint('PlexConnect', 0, "Python: {0}", sys.version)
    dprint('PlexConnect', 0, "Host OS: {0}", sys.platform)
    dprint('PlexConnect', 0, "PILBackgrounds: Is PIL installed? {0}", isPILinstalled())
    
    # more Settings
    param['IP_self'] = getIP_self()
    param['IP_outside'] = getIP_outside()
    param['HostToIntercept'] = cfg.getSetting('hosttointercept')
    param['baseURL'] = 'http://'+ param['HostToIntercept']
    
    # proxy for ATVSettings
    proxy = BaseManager()
    proxy.register('ATVSettings', ATVSettings.CATVSettings)
    proxy.start(initProxy)
    param['CATVSettings'] = proxy.ATVSettings()
    
    running = True
    
    # init DNSServer
    if cfg.getSetting('enable_dnsserver')=='True':
        master, slave = Pipe()  # endpoint [0]-PlexConnect, [1]-DNSServer
        proc = Process(target=DNSServer.Run, args=(slave, param))
        proc.start()
        
        time.sleep(0.1)
        if proc.is_alive():
            procs['DNSServer'] = proc
            pipes['DNSServer'] = master
        else:
            dprint('PlexConnect', 0, "DNSServer not alive. Shutting down.")
            running = False
    
    # init WebServer
    if running:
        master, slave = Pipe()  # endpoint [0]-PlexConnect, [1]-WebServer
        proc = Process(target=WebServer.Run, args=(slave, param))
        proc.start()
        
        time.sleep(0.1)
        if proc.is_alive():
            procs['WebServer'] = proc
            pipes['WebServer'] = master
        else:
            dprint('PlexConnect', 0, "WebServer not alive. Shutting down.")
            running = False
    
    # init WebServer_SSL
    if running and \
       cfg.getSetting('enable_webserver_ssl')=='True':
        master, slave = Pipe()  # endpoint [0]-PlexConnect, [1]-WebServer
        proc = Process(target=WebServer.Run_SSL, args=(slave, param))
        proc.start()
        
        time.sleep(0.1)
        if proc.is_alive():
            procs['WebServer_SSL'] = proc
            pipes['WebServer_SSL'] = master
        else:
            dprint('PlexConnect', 0, "WebServer_SSL not alive. Shutting down.")
            running = False
    
    # not started successful - clean up
    if not running:
        cmdShutdown()
        shutdown()
    
    return running
コード例 #16
0
##使用persistentdb程序会崩溃,该方法会将queue中数据全部读完,崩溃的原因可能是mysql连接过多
##难道是一个线程只能使用 该线程中数据库pool()

# mysql=Cloud_Music_MySQL_Pool()

# mysql=Cloud_Music_MySQL()
##不能只用一个mysql对象进行mysql插入操作,会抛出mysql have gone away 异常

# 从网络上获取Queue
BaseManager.register('get_song_info_queue')

# 连接服务器
server_addr = '127.0.0.1'
print 'Connect to server %s ...' % server_addr

manager = BaseManager(address=('127.0.0.1', 5000), authkey='music')
manager.connect()

# 获取Queue对象
song_info_queue = manager.get_song_info_queue()


def run():
    count = 0
    multiple = 3
    while True:
        try:
            logging.info(u'wait 100 second try to get element from queue')
            d = song_info_queue.get(timeout=100)
            logging.info(u'get %s from queue' % d)
            # mysql=Cloud_Music_MySQL()
コード例 #17
0
ファイル: slave1.py プロジェクト: lvah/201901python
"""
文件名: $NAME.py
日期: 22  
作者: lvah
联系: [email protected]
代码描述: 



"""
import time
from multiprocessing.managers import BaseManager

# 1. 连接Master端, 获取共享的队列;ip是master端的ip, port'也是master端manager进程绑定的端口;
slave = BaseManager(address=('172.25.254.250', 4000), authkey=b'westos')

# 2. 注册队列, 获取共享的队列内容;
BaseManager.register('get_task_queue')
BaseManager.register('get_result_queue')

# 3. 连接master端;
slave.connect()

# 4. 通过网络访问共享的队列;
task = slave.get_task_queue()
result = slave.get_result_queue()

# 5. 读取管理端共享的任务, 并依次执行;
for i in range(500):
    n = task.get()
    print("slave1 运行任务 %d ** 2: " % (n))
コード例 #18
0
def get_logQueue(address = logQueueAddress, authkey = logQueueAuthKey):
        m = BaseManager(address=address, authkey=authkey)
        m.connect()
        logQueue = m.get_logQueue()
        return logQueue
コード例 #19
0
def convert_pac_data(file_list,
                     data_path,
                     processor,
                     threshold_dB,
                     magnification_dB,
                     load_wave=False):
    # check existing file
    tar = data_path.split('/')[-1] + '.txt'
    if tar in file_list:
        print("=" * 46 + " Warning " + "=" * 45)
        while True:
            ans = input(
                "The exported data file has been detected. Do you want to overwrite it: (Enter 'yes' or 'no') "
            )
            if ans.strip() == 'yes':
                os.remove(tar)
                break
            elif ans.strip() == 'no':
                sys.exit(0)
            print("Please enter 'yes' or 'no' to continue!")

    file_list = os.listdir(data_path)
    each_core = int(math.ceil(len(file_list) / float(processor)))
    result, data_tra, tmp_all = [], [], []

    print("=" * 47 + " Start " + "=" * 46)
    start = time.time()

    manager = BaseManager()
    # 一定要在start前注册,不然就注册无效
    manager.register('GlobalV', GlobalV)
    manager.start()
    obj = manager.GlobalV()

    # Multiprocessing acceleration
    pool = multiprocessing.Pool(processes=processor)
    for idx, i in enumerate(range(0, len(file_list), each_core)):
        process = Preprocessing(idx, threshold_dB, magnification_dB, data_path,
                                processor)
        result.append(
            pool.apply_async(process.main, (
                file_list[i:i + each_core],
                obj,
                load_wave,
            )))

    pri = process.save_features(result)

    pool.close()
    pool.join()

    data_pri = np.array(
        [np.array(i.strip('\n').split(', ')).astype(np.float32) for i in pri])
    del file_list, pri
    chan_1 = data_pri[np.where(data_pri[:, 2] == 1)[0]]
    chan_2 = data_pri[np.where(data_pri[:, 2] == 2)[0]]
    chan_3 = data_pri[np.where(data_pri[:, 2] == 3)[0]]
    chan_4 = data_pri[np.where(data_pri[:, 2] == 4)[0]]

    end = time.time()

    print("=" * 46 + " Report " + "=" * 46)
    print("Calculation Info--Quantity of valid data: %s" % data_pri.shape[0])
    if load_wave:
        print(
            "Waveform Info--Channel 1: %d | Channel 2: %d | Channel 3: %d | Channel 4: %d"
            % (len(obj.get_1()), len(obj.get_2()), len(
                obj.get_3()), len(obj.get_4())))
    print(
        "Features Info--All channel: %d | Channel 1: %d | Channel 2: %d | Channel 3: %d | Channel 4: %d"
        % (data_pri.shape[0], chan_1.shape[0], chan_2.shape[0],
           chan_3.shape[0], chan_4.shape[0]))
    print("Finishing time: {}  |  Time consumption: {:.3f} min".format(
        time.asctime(time.localtime(time.time())), (end - start) / 60))
    return data_pri, obj, obj.get_1().sort(key=lambda x: x[-1]), obj.get_2().sort(key=lambda x: x[-1]), \
           obj.get_3().sort(key=lambda x: x[-1]), obj.get_4().sort(key=lambda x: x[-1])
コード例 #20
0
def get_rdnsLookupDict(address = logQueueAddress, authkey = logQueueAuthKey):
        m = BaseManager(address=address, authkey=authkey)
        m.connect()
        rdnsLookupDict = m.get_rdnsLookupDict()
        return rdnsLookupDict
コード例 #21
0
ファイル: nodecontrol.py プロジェクト: RYUUK0/-
    def start_manager(self):
        BaseManager.register('get_task_queue', callable = self.get_url_q)
        BaseManager.register('get_result_queue', callable = self.get_res_q)
        manager = BaseManager(address = ('', 8200), authkey = st.MANAGER_AUTHKEY.encode())

        return manager
コード例 #22
0
 def launch_data_server(self):
     self.ManagerServer = BaseManager(address=('', 50000), authkey=b'abc')
     self.ManagerServer.start()
コード例 #23
0
    def get_selector(selector_name, game, path, fixed=False, max=-1):

        # Register classes for sharing across procs
        for c in [
                RandomSelector, RandomWithDifSelector,
                SequentialHumanLevelSelector, RandomPCGSelector,
                RandomWithDifPCGSelector, ProgressivePCGSelector,
                SequentialSelector, ABTestSelector
        ]:
            BaseManager.register(c.__name__, c)
        manager = BaseManager()
        manager.start()

        # Determine selector
        if selector_name is not None:
            make_path(path)
            path = os.path.realpath(path)
            if selector_name == "ab-test":
                selector = manager.ABTestSelector(path,
                                                  game,
                                                  "levels_2",
                                                  max=max)
            elif selector_name == "random-all":
                selector = manager.RandomSelector(path,
                                                  game, [0, 1, 2, 3, 4],
                                                  max=max)
            elif selector_name == "random-0123":
                selector = manager.RandomSelector(path,
                                                  game, [0, 1, 2, 3],
                                                  max=max)
            elif selector_name.startswith('random-'):
                difficulty = float(selector_name.split('random-')[1]) * 0.1
                selector = manager.RandomWithDifSelector(path,
                                                         game,
                                                         difficulty,
                                                         max=max)
            elif selector_name.startswith('seq-human-'):
                level_id = int(selector_name.split('seq-human-')[1])
                selector = manager.SequentialHumanLevelSelector(path,
                                                                game,
                                                                level_id,
                                                                max=max)
            elif selector_name.startswith('seq-'):
                difficulty = float(selector_name.split('seq-')[1]) * 0.1
                selector = manager.SequentialSelector(path,
                                                      game,
                                                      difficulty,
                                                      max=max)
            elif selector_name == "pcg-random":
                selector = manager.RandomPCGSelector(path, game, max=max)
            elif selector_name.startswith('pcg-random-'):
                difficulty = float(selector_name.split('pcg-random-')[1]) * 0.1
                selector = manager.RandomWithDifPCGSelector(path,
                                                            game,
                                                            difficulty,
                                                            fixed=fixed,
                                                            max=max)
            elif selector_name == "pcg-progressive":
                selector = manager.ProgressivePCGSelector(path, game, max=max)
            elif selector_name == "pcg-progressive-fixed":
                selector = manager.ProgressivePCGSelector(path,
                                                          game,
                                                          upper_limit=False,
                                                          max=max)
            else:
                raise Exception("Unknown level selector: + " + selector_name)
        else:
            return None

        return selector
コード例 #24
0
    def register_teacher(self, in_path=None, in_address=None):
        """Register one teacher model and assign the order number to it as 
           its id, with the file path (offline mode) or IP address (online 
           mode) that the teacher model wrote knowledge data to.

        Args:
            in_path (str|None): The input file path. Default None.
            in_address (str|None): The input IP address, in the format 
                "<IP address>:<IP port>" (e.g. "127.0.0.1:8080"). Default None.
        """
        if self._started:
            raise ValueError(
                "The student has been started and cannot register "
                "teacher no longer!")
        if in_path and in_address:
            raise ValueError("Input path and input address should not "
                             "be given at the same time!")
        if not in_path and not in_address:
            raise ValueError("One of input path and input address should "
                             "be given when registering teacher!")
        if in_address:
            if in_address in self._in_addresses:
                print("WARNING: the teacher with input address {} has been "
                      "registered, and ignored this time!".format(in_path))
                return
            ip, port = in_address.strip().split(":")
            BaseManager.register("get_knowledge_queue")
            BaseManager.register("get_s2t_queue")
            BaseManager.register("get_t2s_queue")
            BaseManager.register("get_cmd_queue")
            manager = BaseManager(address=(ip, int(port)),
                                  authkey=public_authkey.encode())

            # Wait for teacher model started to establish connection
            print("Connecting to {}, with public key {} ...".format(
                in_address, public_authkey))
            while True:
                try:
                    manager.connect()
                    break
                except:
                    time.sleep(1.0)

            knowledge_queue = manager.get_knowledge_queue()
            self._t2s_queues.append(manager.get_t2s_queue())
            self._s2t_queues.append(manager.get_s2t_queue())
            self._cmd_queues.append(manager.get_cmd_queue())
            self._in_addresses.append(in_address)
            self._in_paths.append(None)
            print("Registered teacher {} with input address {}.".format(
                self._num_teachers, in_address))
        else:
            if in_path in self._in_paths:
                print("WARNING: th teacher with input path {} has been "
                      "registered, and ignored this time!".format(in_path))
                return

            def read_offline(in_path, cmd_queue, out_queue):
                end_recved = False

                def get_cmd():
                    cmd, end_recved = None, False
                    try:
                        if not cmd_queue.empty():
                            cmd = cmd_queue.get()
                            cmd_queue.task_done()
                            if isinstance(cmd, EndSignal):
                                end_recved = True
                    except IOError:
                        end_recved = True
                    return cmd, end_recved

                # wait for the sync in start
                while not end_recved:
                    cmd, end_recved = get_cmd()
                    if isinstance(cmd, SyncSignal):
                        out_queue.put(SyncSignal())
                        break
                # for multiple-times offline serving
                while not end_recved:
                    # wait for the sync in get_knowledge_desc()
                    while not end_recved:
                        cmd, end_recved = get_cmd()
                        if isinstance(cmd, SyncSignal):
                            out_queue.put(SyncSignal())
                            break

                    if end_recved:
                        break
                    with open(in_path, 'rb') as fin:
                        # get knowledge desc
                        desc = pickle.load(fin)
                        out_queue.put(desc)
                        # wait for the data accessing signal
                        while not end_recved:
                            cmd, end_recved = get_cmd()
                            if isinstance(cmd, StartSignal):
                                break
                        # get knowledge data
                        while not end_recved:
                            try:
                                data = pickle.load(fin)
                                out_queue.put(data)
                                _, end_recved = get_cmd()
                            except EOFError:
                                break
                    if end_recved:
                        break
                    out_queue.put(EndSignal())
                    out_queue.join()

            knowledge_queue = Manager().Queue(100)
            cmd_queue = Manager().Queue(5)
            p = Process(target=read_offline,
                        args=(in_path, cmd_queue, knowledge_queue))
            p.daemon = True
            p.start()

            self._t2s_queues.append(None)
            self._s2t_queues.append(None)
            self._cmd_queues.append(cmd_queue)
            self._in_addresses.append(None)
            self._in_paths.append(in_path)
            print("Registered teacher {} with input path {}.".format(
                self._num_teachers, in_path))

        self._teacher_knowledge_queues.append(knowledge_queue)
        self._num_teachers += 1
コード例 #25
0
        process = multiprocessing.Process(target=game.agent_learn, args=(1, start_event_l1, end_event_l1))
        process.start()
        processes_learn.append(process)

        """
            Define Manager
        """
        # TODO, modify device label

        BaseManager.register('get_actions_g5_queue')
        BaseManager.register('get_rewards_g5_queue')
        BaseManager.register('get_states_g5_queue')
        BaseManager.register('get_fap_g5_queue')
        BaseManager.register('get_finish_g5_queue')
        BaseManager.register('get_actions_glb_g5_queue')
        mgr = BaseManager(address=("172.16.68.1", 4444), authkey=b"game")

        mgr.connect()
        actions_g5 = mgr.get_actions_g5_queue()
        rewards_g5 = mgr.get_rewards_g5_queue()
        states_g5 = mgr.get_states_g5_queue()
        fap_g5 = mgr.get_states_g5_queue()
        finish_g5 = mgr.get_finish_g5_queue()
        actions_glb_g5 = mgr.get_actions_glb_g5_queue()

        for time_round in range(game.MAX):
            print("time: " + str(time_round))

            """
                Action Choosing part
            """
コード例 #26
0
"""Simple starter script for creating an RPC server with multiprocessing
"""
from multiprocessing.managers import BaseManager

PORT = 8000


def foo(num1: int, num2: int):
    return num1 + num2


if __name__ == "__main__":
    manager = BaseManager(('', PORT), b'password')
    manager.register('foo', foo)
    server = manager.get_server()
    print(f"server will now server on port {PORT}")
    server.serve_forever()
コード例 #27
0
ファイル: main.py プロジェクト: praveen-palanisamy/Ape-X-DQN

if __name__ == "__main__":
    params = json.load(open(args.params_file, 'r'))
    env_conf = params['env_conf']
    actor_params = params["Actor"]
    learner_params = params["Learner"]
    replay_params = params["Replay_Memory"]
    print(
        "Using the params:\n env_conf:{} \n actor_params:{} \n learner_params:{} \n, replay_params:{}"
        .format(env_conf, actor_params, learner_params, replay_params))

    mp_manager = mp.Manager()
    shared_state = mp_manager.dict()
    shared_mem = mp_manager.Queue()
    ReplayManager = BaseManager()
    ReplayManager.start()
    replay_mem = ReplayManager.Memory(replay_params["soft_capacity"],
                                      replay_params)

    # A learner is started before the Actors so that the shared_state is populated with a Q_state_dict
    learner = Learner(env_conf, learner_params, shared_state, replay_mem)
    learner_proc = mp.Process(target=learner.learn, args=(500000, ))
    learner_proc.start()

    #  TODO: Test with multiple actors
    actor_procs = []
    for i in range(actor_params["num_actors"]):
        actor_proc = Actor(i, env_conf, shared_state, shared_mem, actor_params)
        actor_proc.start()
        actor_procs.append(actor_proc)
コード例 #28
0
ファイル: client.py プロジェクト: FossenWang/pieces
from multiprocessing.managers import BaseManager

manager = BaseManager(address=('127.0.0.1', 5000), authkey=b'abc')
manager.register('send_task')
manager.connect()

print(manager.send_task(1))
print(manager.send_task(2))
print(manager.send_task(3))
コード例 #29
0
ファイル: client.py プロジェクト: zjkfly/mutprocess_queue
def connect_s():
    BaseManager.register('task_queue')
    client = BaseManager(address=('127.0.0.1',9999),authkey=b'zjk')
    client.connect()
    return client
コード例 #30
0
ファイル: Fund.py プロジェクト: bobchi/learn_py

def get_html(page_url):
    response = request.urlopen(page_url)
    raw_html = response.read()
    get_encoding = chardet.detect(raw_html)['encoding']  # 检测网页编码
    return raw_html.decode(get_encoding, 'ignore')


def get_page_count(html):
    soup = BeautifulSoup(html, 'html.parser')
    page_count = soup.find('div', id='pager').find('span', 'nv').get_text()
    return (''.join(filter(str.isdigit, page_count)))  # 提取字符串中的数字


bm = BaseManager(address=('', 8084), authkey=b'12345')
driver = None
all_page = 0


# 初始化并获取页面总数
def init_spider():
    driver = webdriver.PhantomJS()
    driver.get(page1_url)
    page_str = driver.find_element_by_id('pager').find_element_by_xpath(
        'span[@class="nv"]').text
    all_page = int(''.join(filter(str.isdigit, page_str)))


# 将页面数据存入本地txt文件
def get_html_raw(my_range):