Пример #1
0
 def __init__(self,address='127.0.0.1',port=6666,authkey='',rootDomain='',digSubDomain=False,threads_num=1):
     
     BaseManager.__init__(self,(address,int(port)),authkey)
     task_queue_n=Queue.Queue()
     response_queue_n=Queue.Queue()
     
     # self.address=address
     self.port=port
     self.authkey=authkey
     self.register('task_queue_n',callable=lambda:task_queue_n)
     self.register('response_queue_n',callable=lambda:response_queue_n)
      
     # self.tasks=[]  #记录自己已经访问过的域名,或者路径 
     
     self.digSubDomain=digSubDomain
     self.DnsThread=None 
     self.httpserverThread=None         
     self.START_FLAG=True     
     self.count=0   #访问错误计数
     self.delay=0  #延时时间 
     
     self.threads_num=threads_num #爆破子域名的线程数
     if self.digSubDomain:
         
         self.domain=domainRecorder(rootDomain=rootDomain,domain=rootDomain,path='/',isSubDomain=True)
         
     else:
         self.domain=domainRecorder(rootDomain=rootDomain,domain=rootDomain,path='/',isSubDomain=False)
     
     self.domainCount=0
Пример #2
0
 def __init__(self):
     self.config.read('config.ini')
     BaseManager.register('Queue', queue.Queue)
     manager = BaseManager()
     manager.start()
     self.queue = manager.Queue()
     self.threads = {
         'sensorreading_submitter': {
             'last_run': None,
             'timeout': int(self.config.get('api', 'submit_sensorreadings_interval')),
             'thread': None
         },
         'desired_state_fetcher': {
             'last_run': None,
             'timeout': int(self.config.get('api', 'fetch_desired_states_interval')),
             'thread': None
         },
         'sensorreading_buffer': {
             'last_run': None,
             'timeout': int(self.config.get('api', 'sensorreading_buffer_flush_interval')),
             'thread': None
         },
         'maintenance': {
             'last_run': None,
             'timeout': int(self.config.get('api', 'maintenance_interval')),
             'thread': None
         }
     }
Пример #3
0
 def run(self):
     while not self.is_stop:
         mgr = BaseManager(address=('', 7777), authkey='xxx')
         s = mgr.get_server()
         s.serve_forever()
         i = 1
         i += 1
Пример #4
0
def make_simple_server_manager(ip,port, authkey):
	print "starting server..."
	manager = BaseManager(address=('', 50000), authkey='abc')
	server = manager.get_server()
	server.serve_forever()
	print "started"
	return manager
 def create(self, name, **ctor_args):
     ''' Create a :class:`ConsoleServer` and return a proxy for it. 
     '''
     manager = BaseManager()
     manager.register('ConsoleServer', ConsoleServer)
     manager.start()
     return manager.ConsoleServer()
Пример #6
0
    def __init__(self, address=None, authkey=None, serialize='pickle',
                 ctx=None):
        BaseManager.__init__(self, address, authkey, serialize, ctx)
        self.task_queue, self.result_queue = None, None

        # callable使用lambda报错
        self.register('get_task_queue', callable=return_task_queue)
        self.register('get_result_queue', callable=return_result_queue)
Пример #7
0
    def start(self):
        BaseManager.start(self)

        # 加载任务队列和结果队列
        if not self.task_queue:
            self.task_queue = self.get_task_queue()

        if not self.result_queue:
            self.result_queue = self.get_result_queue()
Пример #8
0
    def _setup(self):
        free_phy_mem = available_memory(percent=35)
        maxsize = int(free_phy_mem / BLOCK_SIZE)
        self._input_buffer = self.manager.Queue(maxsize)
        self.output_buffer = self.manager.Queue(maxsize)

        BaseManager.register("ReoderBuffer", ReoderBuffer)
        bm = BaseManager()
        bm.start()
        self.reorder_buffer = bm.ReoderBuffer(self.output_buffer, 50)
 def __init__(self):
     BaseManager.__init__(self,address=(Parameter.SERVER, Parameter.PORT), authkey=Parameter.PASSWORD)
     self._manager = Manager()
     self._coordinates = self._manager.Queue()
     self._values = self._manager.Queue()
     self._lock = self._manager.Lock()
     self._clientCounter = self._manager.Value('i', 0)
     self.register('clientStart', self.addClient)
     self.register('clientDone', self.removeClient)
     self.register('getCoordinatesLock', self.getCoordinatesLock)
     self.register('getCoordinatesQueue', self.getCoordinates)
     self.register('getValuesQueue', self.getValues)
Пример #10
0
    def __init__(self, address, authkey):
        BaseManager.__init__(self, address, authkey)
        self.srvAddr = address
        self.srvKey = authkey

        AgentManager.register("get")
        AgentManager.register("load")
        AgentManager.register("readFile")
        AgentManager.register("writeFile")
        AgentManager.register("printmsg")

        self.localattrs = dict()
Пример #11
0
 def __init__(self, host=None, port=None, authkey=None, delmult=1, replaceterm=None, replacewith=None, name='1', delinprod=True):
     self.name = name
     self.host = host
     self.port = port
     self.authkey = authkey
     self.delinprod = delinprod
     print 'Initializing LogParser: ' + self.name + ' as BaseManager(address=(' + host + ', ' + str(port) + ', authkey=' + authkey + ') with remote queues'
     BaseManager.register('get_log_queue')
     self.m = BaseManager(address=(host, port), authkey=authkey)
     self.m.connect()
     self.queue = self.m.get_log_queue()
     self.delmult = delmult
     self.replaceterm = replaceterm
     self.replacewith = replacewith
Пример #12
0
    def connect(self, pythonexec=None, parent=None):
        "Custom connection method that will start up a new server"

        # fork a new server process with correct python interpreter (py3/venv)
        if pythonexec:
            # warning: this will not work frozen? (ie. py2exe)
            command = pythonexec + " -u %s --server" % __file__

            import wx
            
            class MyProcess(wx.Process):
                "Custom Process Class to handle OnTerminate event method"

                def OnTerminate(self, pid, status):
                    "Clean up on termination (prevent SEGV!)"
                
                def OnClose(self, evt):
                    "Termitate the process on exit"
                    # prevent the server continues running after the IDE closes
                    print("closing pid", self.GetPid())
                    self.Kill(self.GetPid())
                    print("killed")


            self.process = MyProcess(parent)
            parent.Bind(wx.EVT_CLOSE, self.process.OnClose)
            #process.Redirect()
            flags = wx.EXEC_ASYNC
            if wx.Platform == '__WXMSW__':
                flags |= wx.EXEC_NOHIDE
            wx.Execute(command, flags, self.process)

            return BaseManager.connect(self)
class QueueServer(object):

    def __init__(self, queue_server_host, queue_server_port, queue_server_authkey=None):
        '''
            host,port defines where your queuing server should be running while *authkey* 
            is going to be used to authenticate any communication between this queue server
            and clients connected to it. Clients will need to send the *authkey* to connect
            this queue server.
        '''
        self.host = queue_server_host
        self.port = queue_server_port
        self.authkey = queue_server_authkey

        '''
        Lets just say, we have a client that wants to put some image realted data into database 
        and also want to generate thumbnails from it (You know, where it is going,
        I'll give you a hint, checkout my last post about multi-threading)
        '''
        database_queue = Queue()
        thumbnail_queue = Queue()
        
        '''now we have a queue, but if since we want our clients to use it
        we'll need to register this queue with BaseManager via some callable that our client 
        can use to generate the proxy object. Yes, clients will be actually
        able to get the (proxy) object of this Queue and for them, they can
        pretty much use it like a regular queue (however, internally, BaseManager
        will be proxying that data sharing between client and server (and thats the 
        fun, we don't have to worry about locking, shared memory handling etc as 
        BaseManager will handle that, and for us it will be like using Queue between
        threads'''
        BaseManager.register('database_queue', callable=lambda:database_queue)
        BaseManager.register('thumbnail_queue', callable=lambda:thumbnail_queue)

        '''Now that we have registered our queue with BaseManager, we can instantiate
        manager object and start the server. As mentioned, BaseManager will spawn a 
        server in a subprocess and will handle all the communcation and data synchronization'''
        self.manager = BaseManager(address=(self.host, self.port), 
                                   authkey=self.authkey)
        
    def start(self):
        print 'Starting Server Process...'
        self.manager.start()
        
    def stop(self):
        self.manager.shutdown()
Пример #14
0
 def start(self, controller):
     self.controller = controller
     self.register('get_controller', self.get_controller)
     if not self.try_next_port:
         self.get_server().serve_forever()
     host, port = self.address
     while self.try_next_port:
         try:
             BaseManager.__init__(self, (host,port), self.authkey)
             self.get_server().serve_forever()
             self.try_next_port = False
         except socket.error as e:
             try_next_port = False
             import errno
             if e.errno == errno.EADDRINUSE:
                 port += 1
             else:
                 raise
Пример #15
0
    def __init__(self):
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')

        server_addr = '127.0.0.1'
        print ('Connect to server %s...' % server_addr)

        self.m=BaseManager(address=(server_addr,8001),authkey='qiye'.encode('utf-8'))
        print 'connecting...'
        self.m.connect()
        print 'connected'

        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()

        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        print 'spider init finish'
Пример #16
0
class SpiderWork(object):
    def __init__(self):
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')

        server_addr = '127.0.0.1'
        print ('Connect to server %s...' % server_addr)

        self.m=BaseManager(address=(server_addr,8001),authkey='qiye'.encode('utf-8'))
        print 'connecting...'
        self.m.connect()
        print 'connected'

        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()

        self.downloader = HtmlDownloader()
        self.parser = HtmlParser()
        print 'spider init finish'


    def crawl(self):
        while True:
            try:
                # print self.task
                if not self.task.empty():
                    url = self.task.get()

                    if url == 'end':
                        print ('stop...')
                        # 通知其它节点停止
                        self.result.put({'new_urls':'end','data':'end'})
                        return
                    print ('spider is working on %s'%url)
                    content = self.downloader.download(url)
                    new_urls, data = self.parser.parser(url, content)
                    self.result.put({'new_urls':new_urls,'data':data})
            except EOFError as e:
                print 'cannot connect other'
                return
            except Exception as e:
                print e
                print 'crawl fail'
Пример #17
0
	def __init__(self):
		class QueueManager(BaseManager):
    			pass
    			# 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:
		BaseManager.register('get_task_queue')
		BaseManager.register('get_result_queue')
		# 连接到服务器,也就是运行taskmanager.py的机器:
		server_addr = '10.1.142.100'
		print('Connect to server %s...' % server_addr)
		# 端口和验证码注意保持与taskmanager.py设置的完全一致:
		m = BaseManager(address=(server_addr, 8888), authkey='abc')
		# 从网络连接:
		m.connect()
		# 获取Queue的对象:
		self.task = m.get_task_queue()
		self.result = m.get_result_queue()
Пример #18
0
class SlaveWork(object):

    def __init__(self):

        #初始化分布式进程中的工作节点的链接工作
        #实现第一步,使用basemanager注册获取queue的方法名称
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')

        ##实现第二步,连接到服务器
        server_addr = '127.0.0.1'
        # 端口和验证口令注意保持与服务进程设置的完全一致:
        self.m = BaseManager(address=(server_addr, 8081), authkey='seven')
        # 从网络连接:
        self.m.connect()

        ##实现第三步
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()

        ##初始化网页下载器和解析器
        self.downloader = HtmlDownloader()
        self.parser = HtmlParse()

    def crawl(self):
        while(True):
            try:
                if not self.task.empty():
                    url = self.task.get()
                    if url =='end':
                        print("控制节点通知爬虫节点停止工作")
                        self.result.put({'new_urls':'end','data':'end'})
                        return
                    print('爬虫节点正在解析:%s' % url.encode('utf-8'))
                    content = self.downloader.download(url)
                    new_urls, data = self.parser.parser(url, content)
                    self.result.put({"new_urls": new_urls, "data": data})
            except EOFError:
                print("连接工作节点失败")
                return
            except Exception:
                print('Crawl  fali ')
Пример #19
0
 def __init__(self, host=None, port=None, authkey=None, baseurl=None, name='1', wq=None, rq=None, delinprod=True):
     self.name = name
     self.baseurl = baseurl
     self.delinprod = delinprod
     self.m = None
     if wq is None or rq is None:
         print 'Initializing RequestGenerator: ' + self.name + ' as BaseManager(address=(' + host + ', ' + str(port) + ', authkey=' + authkey + ') with remote queues'
         BaseManager.register('get_work_queue')
         BaseManager.register('get_result_queue')
         self.m = BaseManager(address=(host, port), authkey=authkey)
         self.m.connect()
         self.work_queue = self.m.get_work_queue()
         self.result_queue = self.m.get_result_queue()
     else:
         print 'Initializing RequestGenerator: ' + self.name + ' with shared local queues'
         self.work_queue = wq
         self.result_queue = rq
     #self.work_queue.cancel_join_thread()
     #self.result_queue.cancel_join_thread()
     self.http = urllib3.PoolManager()
Пример #20
0
    def __init__(self, address, authkey):
        BaseManager.__init__(self, address, authkey)
        self.agentData = address[0], address[1], authkey

        self.srvAddr = address
        self.srvKey = authkey

        # remoteTask is task object remotely instantiated in the agent
        self.remoteTask = None

        # toolbagTask is toolbag's local copy
        self.toolbagTask = None

        AgentManager.register("get")
        AgentManager.register("load")
        AgentManager.register("readFile")
        AgentManager.register("writeFile")
        AgentManager.register("printmsg")

        self.localattrs = dict()
    def __init__(self, queue_name, server_host, server_port, server_authkey):
        '''clients need to know which queue on our server they want to use'''
        self.queue_name = queue_name

        '''as name suggested queue_server, queue_port and server_authkey
        are needed to connect to our queue server
        '''
        self.host = server_host
        self.port = server_port
        self.authkey = server_authkey
        
        '''Now it is an important step, we need to tell BaseManager class about this queue
        so when we connect with our server, we can get the proxy object of our queue. 
        Notice here, we are not passing any callable here for the same reason'''
        BaseManager.register(self.queue_name)
        
        '''lets create an instance of BaseManager class so we can connect to server'''
        self.manager = BaseManager(address=(self.host, self.port), 
                                   authkey=self.authkey)
        self.manager.connect()
Пример #22
0
class SharedManager(object):
    def __init__(self, addr, auth_key=''):
        self.manager = BaseManager(address=addr, authkey=auth_key)
        self.server = None
    
    def register(self, plugin, clazz, methods):
        self.manager.register(plugin, clazz, exposed=methods)
    
    def get_manager(self):
        return self.manager
    
    def get_server(self):
        return self.server
    
    def start(self):
        try:
            self.server = self.manager.get_server()
            self.server.serve_forever()
        except:
            traceback.print_exc()
Пример #23
0
 def __init__(self,thread_size=0,server_addr='127.0.0.1',server_port=6666,authkey=b'123456',lock=None,count=10):
     threading.Thread.__init__(self)  #初始化父类
     BaseManager.__init__(self,address=(server_addr,server_port), authkey=authkey)
     
     self.visited=None    #访问过的集合
     self.pages=None     #采集到的页面
     self.threads=[]  #存储每个线程
     self.wait_queue=Queue.Queue()  #正在等待的任务组成的队列
     self.lock=lock #设置线程锁
     
     # 为实现任务管理设置的标志位 
     
     self.start_flag = False
     self.is_running = False  
     self.finished_all = False
     self.dead_all = False
     
     self.Runable=True  # 
     
     self.thread_size=thread_size  #设置线程的个数 
     self.task_num=self.thread_size*2     # 设置任务队列的大小
     
     
     '''
     初始化服务器连接模块  
     
     '''
     self.server_addr=server_addr
     self.server_port=server_port
     self.authkey=authkey
     self.register('task_queue_n')
     self.register('response_queue_n')
     self.__count=count #尝试连接服务器的次数  
     
     print'[%s] [INFO] Connect to server %s...' %(self.__time(),server_addr)
     
     try:
         self.connect()
     except Exception,e:
         print "[%s] [ERROR] Connect to server error , please confirm ip,port and authkey ..."%(self.__time())
         exit(0)
Пример #24
0
	def __init__(self):
		# 发送任务的队列:
		task_queue = Queue.Queue()
		# 接收结果的队列:
		result_queue = Queue.Queue()
		class QueueManager(BaseManager):
		    pass
		# 把两个Queue都注册到网络上, callable参数关联了Queue对象:
		BaseManager.register('get_task_queue', callable=lambda: task_queue)
		BaseManager.register('get_result_queue', callable=lambda: result_queue)
		# 绑定端口5000, 设置验证码'abc':
		manager = BaseManager(address=('',8888), authkey='abc')
		# 启动Queue:
		manager.start()
		# 获得通过网络访问的Queue对象:
		self.task = manager.get_task_queue()
		self.result = manager.get_result_queue()
Пример #25
0
    def start(self):
        BaseManager.register('get_dispatched_job_queue')
        BaseManager.register('get_finished_job_queue')

        server = '127.0.0.1'
        print('Connect to server %s...' % server)
        manager = BaseManager(address=(server, 8888), authkey='jobs')
        manager.connect()

        dispatched_jobs = manager.get_dispatched_job_queue()
        finished_jobs = manager.get_finished_job_queue()

        while True:
            job = dispatched_jobs.get(timeout=1)
            print('Run job: %s ' % job.job_id)
            time.sleep(1)
            finished_jobs.put(job)
Пример #26
0
    def __init__(self):

        #初始化分布式进程中的工作节点的链接工作
        #实现第一步,使用basemanager注册获取queue的方法名称
        BaseManager.register('get_task_queue')
        BaseManager.register('get_result_queue')

        ##实现第二步,连接到服务器
        server_addr = '127.0.0.1'
        # 端口和验证口令注意保持与服务进程设置的完全一致:
        self.m = BaseManager(address=(server_addr, 8081), authkey='seven')
        # 从网络连接:
        self.m.connect()

        ##实现第三步
        self.task = self.m.get_task_queue()
        self.result = self.m.get_result_queue()

        ##初始化网页下载器和解析器
        self.downloader = HtmlDownloader()
        self.parser = HtmlParse()
Пример #27
0
def main():
    BaseManager.register('run_rpc_function')
    manager = BaseManager(address=('127.0.0.1',51999), authkey='my_authkey')
    manager.connect()
    returned_proxy_object = manager.run_rpc_function('SOME LOUD TEXT')
    print returned_proxy_object
    print returned_proxy_object._getvalue() #might be a better way to get the value, probably need to specify my own proxy object, see BaseProxy
 def StartManager(self, serveraddress="Localhost", port=80, key=None):
     if(self.queueManager == None):
         try:
             key = key.encode("utf-8")
         except:
             pass
         BaseManager.register('GetTaskQueue', callable=ProcessingQueueManager.ReturnTaskQueue)
         BaseManager.register('GetResultQueue', callable=ProcessingQueueManager.ReturnResultQueue)
         queueManager = BaseManager(address=(serveraddress, port), authkey=key)
         queueManager.start()
         self.queueManager = queueManager
 def StartConnect(self, serveraddress="localhost", port=80, key=None):
     if(self.queueManager == None):
         try:
             key = key.encode("utf-8")
         except:
             pass
         BaseManager.register('GetTaskQueue')
         BaseManager.register('GetResultQueue')
         queueManager = BaseManager(address=(serveraddress, port), authkey=key)
         queueManager.connect()
         self.queueManager = queueManager
Пример #30
0
 def i_images_processing(self, vs): 
     
     """
     Interface function to launch processing VHRS images :func:`i_vhrs` and satellite images :func:`i_img_sat` in multi-processing.
     
     :param vs: Boolean variable to launch processing because of interface checkbox -> 0 or 1.
 
         - 0 means, not texture processing
         - 1 means, launch texture processing
     :type vs: int
     """
     
     # Multiprocessing
     mgr = BaseManager()
     mgr.register('defaultdict', defaultdict, DictProxy)
     mgr.start()
     self.out_ndvistats_folder_tab = mgr.defaultdict(list)
     
     p_img_sat = Process(target=self.i_img_sat)
     p_img_sat.start()
     if self.mp == 0:
         p_img_sat.join()
     
     if vs == 1:
         p_vhrs = Process(target=self.i_vhrs)#, args=(vs, ))
         p_vhrs.start()
         p_vhrs.join()
     
     if self.mp == 1:
         p_img_sat.join()
     
     # List of output raster path
     self.raster_path.append(self.out_ndvistats_folder_tab[0])
     # List of output raster band
     self.list_band_outraster.append(1)
     
     if vs == 1:
         self.raster_path.append(self.out_ndvistats_folder_tab['sfs'])
         self.list_band_outraster.append(4)
         self.raster_path.append(self.out_ndvistats_folder_tab['haralick'])
         self.list_band_outraster.append(2)
     
     # To slope, to extract scree
     if self.path_mnt != '':
         self.raster_path.append(self.path_mnt)
         self.list_band_outraster.append(1)
         
     self.raster_path.append(self.out_ndvistats_folder_tab[1])
     # example raster path tab :
     #                [path_folder_dpt + '/' + folder_processing + '/' + classif_year + '/Min_2014.TIF',\
     #                os.path.dirname(path_ortho) + '/Clip_buffer_surface_dep_18_IRCOrtho65_2m_sfs.TIF',\
     #                os.path.dirname(path_ortho) + '/Clip_buffer_surface_dep_18_IRCOrtho65_2m_haralick.TIF',\
     #                path_folder_dpt + '/' + folder_processing + '/' + classif_year + '/Max_2014.TIF']
     
     # List of output raster band
     self.list_band_outraster.append(1) #[1, 4, 2, 1]
     
     print("End of images processing !")
Пример #31
0
def startup():
    global procs
    global pipes
    global param
    global running

    # Settings
    cfg = Settings.CSettings(CONFIG_PATH)
    param['CSettings'] = cfg

    # Logfile
    if cfg.getSetting('logpath').startswith('.'):
        # relative to current path
        logpath = sys.path[0] + sep + cfg.getSetting('logpath')
    else:
        # absolute path
        logpath = cfg.getSetting('logpath')

    param['LogFile'] = logpath + sep + 'PlexConnect.log'
    param['LogLevel'] = cfg.getSetting('loglevel')
    dinit('PlexConnect', param, True)  # init logging, new file, main process

    dprint('PlexConnect', 0, "Version: {0}", __VERSION__)
    dprint('PlexConnect', 0, "Python: {0}", sys.version)
    dprint('PlexConnect', 0, "Host OS: {0}", sys.platform)
    dprint('PlexConnect', 0, "PILBackgrounds: Is PIL installed? {0}",
           isPILinstalled())

    # more Settings
    param['IP_self'] = getIP_self()
    param['HostToIntercept'] = cfg.getSetting('hosttointercept')
    param['baseURL'] = 'http://' + param['HostToIntercept']

    # proxy for ATVSettings
    proxy = BaseManager()
    proxy.register('ATVSettings', ATVSettings.CATVSettings)
    proxy.start(initProxy)
    param['CATVSettings'] = proxy.ATVSettings(CONFIG_PATH)

    running = True

    # init DNSServer
    if cfg.getSetting('enable_dnsserver') == 'True':
        master, slave = Pipe()  # endpoint [0]-PlexConnect, [1]-DNSServer
        proc = Process(target=DNSServer.Run, args=(slave, param))
        proc.start()

        time.sleep(0.1)
        if proc.is_alive():
            procs['DNSServer'] = proc
            pipes['DNSServer'] = master
        else:
            dprint('PlexConnect', 0, "DNSServer not alive. Shutting down.")
            running = False

    # init WebServer
    if running:
        master, slave = Pipe()  # endpoint [0]-PlexConnect, [1]-WebServer
        proc = Process(target=WebServer.Run, args=(slave, param))
        proc.start()

        time.sleep(0.1)
        if proc.is_alive():
            procs['WebServer'] = proc
            pipes['WebServer'] = master
        else:
            dprint('PlexConnect', 0, "WebServer not alive. Shutting down.")
            running = False

    # init WebServer_SSL
    if running and \
       cfg.getSetting('enable_webserver_ssl')=='True':
        master, slave = Pipe()  # endpoint [0]-PlexConnect, [1]-WebServer
        proc = Process(target=WebServer.Run_SSL, args=(slave, param))
        proc.start()

        time.sleep(0.1)
        if proc.is_alive():
            procs['WebServer_SSL'] = proc
            pipes['WebServer_SSL'] = master
        else:
            dprint('PlexConnect', 0, "WebServer_SSL not alive. Shutting down.")
            running = False

    # not started successful - clean up
    if not running:
        cmdShutdown()
        shutdown()

    return running
Пример #32
0
def test():
    #windows下绑定调用接口不能使用lambda,所以只能先定义函数再绑定
    BaseManager.register('get_task',callable = gettask);
    BaseManager.register('get_result',callable = getresult);
    #绑定端口并设置验证码,windows下需要填写ip地址,linux下不填默认为本地
    manager = BaseManager(address = ('127.0.0.1',5002),authkey = b'123');
    #启动
    manager.start();
    try:
        #通过网络获取任务队列和结果队列
        task = manager.get_task();
        result = manager.get_result();
        #添加任务
        for i in range(task_number):
            print('Put task %d...' % i)
            task.put(i);
        #每秒检测一次是否所有任务都被执行完
        while not result.full():
            time.sleep(1);
        for i in range(result.qsize()):
            ans = result.get();
            print('task %d is finish , runtime:%d s' % ans);
    
    except:
        print('Manager error');
    finally:
        #一定要关闭,否则会爆管道未关闭的错误
        manager.shutdown();
Пример #33
0
class Logger(object):
    def __init__(self):
        print("inited, pid:{}".format(os.getpid()))
        self.counter = 0
        self.pid = os.getpid()

    def increment(self):
        self.counter += 1
        print(self.counter)
        # print(id(self))
        # print(self.pid)
        print(os.getpid())
        print()

    def __del__(self):
        print("deleted, pid:{}".format(os.getpid()))


# logger = Logger()
BaseManager.register('Logger', Logger)
manager = BaseManager()
manager.start()
logger = manager.Logger()


def increment_counter(i):
    print(os.getpid())
    logger.increment()


Пример #34
0
    def __init__(self):
        # Create Shared Variables
        BaseManager.register('CacheDataHandler', CacheDataHandler)
        BaseManager.register('ChannelYouTube', ChannelYouTube)
        BaseManager.register("ChannelTwitch", ChannelTwitch)
        BaseManager.register("Dict", dict)
        BaseManager.register("QueueHandler", QueueHandler)
        BaseManager.register("YouTubeAPIHandler", YouTubeAPIHandler)
        BaseManager.register("GlobalVariables", GlobalVariables)

        # Channel Class
        self.baseManagerChannelInfo = BaseManager()
        self.baseManagerChannelInfo.start()

        # Data Handler
        self.baseManagerNormalHandlers = BaseManager()
        self.baseManagerNormalHandlers.start()
        self.cachedDataHandler = self.baseManagerNormalHandlers.CacheDataHandler(
        )

        # Global Queue Holder.
        self.queue_holder = self.baseManagerNormalHandlers.QueueHandler()

        # YouTube API Handler
        self.baseManagerAPIHandlers = BaseManager()
        self.baseManagerAPIHandlers.start()
        self.youtube_api_handler = self.baseManagerAPIHandlers.YouTubeAPIHandler(
            self.cachedDataHandler)

        # Cookies
        self.baseManagerCookieDictHolder = BaseManager()
        self.baseManagerCookieDictHolder.start()
        cookie_handler = build_cookies()
        cookie_handler.load()
        cookies_ = cookie_handler.get_cookie_list()
        self.shared_cookieDictHolder = self.baseManagerCookieDictHolder.Dict(
            cookies_)  # type: dict

        # Global Variables
        self.baseManagerGlobalVariables = BaseManager()
        self.baseManagerGlobalVariables.start()
        self.shared_globalVariables = self.baseManagerGlobalVariables.GlobalVariables(
        )  # type: GlobalVariables
Пример #35
0
class ProcessHandler:
    channels_dict = {}
    platforms = ['YOUTUBE', 'TWITCH']

    debug_mode = False
    serverPort = 31311
    enable_ffmpeg_logs = False

    # YouTube Queue Stuff
    YouTubeQueueThread = None

    def __init__(self):
        # Create Shared Variables
        BaseManager.register('CacheDataHandler', CacheDataHandler)
        BaseManager.register('ChannelYouTube', ChannelYouTube)
        BaseManager.register("ChannelTwitch", ChannelTwitch)
        BaseManager.register("Dict", dict)
        BaseManager.register("QueueHandler", QueueHandler)
        BaseManager.register("YouTubeAPIHandler", YouTubeAPIHandler)
        BaseManager.register("GlobalVariables", GlobalVariables)

        # Channel Class
        self.baseManagerChannelInfo = BaseManager()
        self.baseManagerChannelInfo.start()

        # Data Handler
        self.baseManagerNormalHandlers = BaseManager()
        self.baseManagerNormalHandlers.start()
        self.cachedDataHandler = self.baseManagerNormalHandlers.CacheDataHandler(
        )

        # Global Queue Holder.
        self.queue_holder = self.baseManagerNormalHandlers.QueueHandler()

        # YouTube API Handler
        self.baseManagerAPIHandlers = BaseManager()
        self.baseManagerAPIHandlers.start()
        self.youtube_api_handler = self.baseManagerAPIHandlers.YouTubeAPIHandler(
            self.cachedDataHandler)

        # Cookies
        self.baseManagerCookieDictHolder = BaseManager()
        self.baseManagerCookieDictHolder.start()
        cookie_handler = build_cookies()
        cookie_handler.load()
        cookies_ = cookie_handler.get_cookie_list()
        self.shared_cookieDictHolder = self.baseManagerCookieDictHolder.Dict(
            cookies_)  # type: dict

        # Global Variables
        self.baseManagerGlobalVariables = BaseManager()
        self.baseManagerGlobalVariables.start()
        self.shared_globalVariables = self.baseManagerGlobalVariables.GlobalVariables(
        )  # type: GlobalVariables

    def run_channel(self,
                    channel: str or ChannelYouTube or ChannelTwitch,
                    platform='YOUTUBE',
                    startup=False,
                    **kwargs) -> List[Union[bool, str]]:
        if type(channel) is str:
            channel_holder_class = self.get_channel_class(channel, platform)
        else:
            channel_holder_class = channel

        if channel_holder_class:
            channel_holder_class.send_updated_setting_dict(kwargs)

            channel_identifier = channel_holder_class.get("channel_identifier")
            ok_bool, error_message = channel_holder_class.loadVideoData()
            if ok_bool:
                channel_holder_class.registerCloseEvent()

                channel_name = channel_holder_class.get("channel_name")
                check_streaming_channel_thread = Process(
                    target=channel_holder_class.channel_thread,
                    name="{0} - Channel Process".format(channel_name))
                check_streaming_channel_thread.start()
                self.channels_dict.update({
                    channel_identifier: {
                        'class': channel_holder_class,
                        'thread_class': check_streaming_channel_thread
                    }
                })
                return [True, "OK"]
            else:
                if startup:
                    self.channels_dict.update({
                        channel_identifier: {
                            'class': channel_holder_class,
                            'error': error_message,
                            'thread_class': None
                        }
                    })
                return [False, error_message]

    def get_channel_class(self,
                          channel_identifier,
                          platform='YOUTUBE') -> TemplateChannel:
        SettingDict = {
            'debug_mode': self.debug_mode,
            'ffmpeg_logs': self.enable_ffmpeg_logs
        }
        channel_holder_class = None
        if 'YOUTUBE' in platform.upper():
            channel_holder_class = self.baseManagerChannelInfo.ChannelYouTube(
                channel_identifier, SettingDict, self.shared_cookieDictHolder,
                self.cachedDataHandler, self.queue_holder,
                self.shared_globalVariables)
        if 'TWITCH' in platform.upper():
            channel_holder_class = self.baseManagerChannelInfo.ChannelTwitch(
                channel_identifier, SettingDict, self.shared_cookieDictHolder,
                self.cachedDataHandler, self.queue_holder,
                self.shared_globalVariables)
        return channel_holder_class

    def run_channel_video_id(self, video_id):
        """

        Runs a Channel Instance without a channel id. Uses a Video ID to get channel id etc

        """
        channel_holder_class = self.baseManagerChannelInfo.ChannelYouTube(
            None, {
                'debug_mode': self.debug_mode,
                'ffmpeg_logs': self.enable_ffmpeg_logs
            }, self.shared_cookieDictHolder, self.cachedDataHandler,
            self.queue_holder)
        ok_bool, error_message = channel_holder_class.loadVideoData(
            video_id=video_id)
        if ok_bool:
            channel_holder_class.registerCloseEvent()
            channel_id = channel_holder_class.get("channel_id")
            channel_name = channel_holder_class.get("channel_name")
            check_streaming_channel_thread = Process(
                target=channel_holder_class.channel_thread,
                name="{0} - Channel Process".format(channel_name))
            check_streaming_channel_thread.start()
            self.channels_dict.update({
                channel_id: {
                    'class': channel_holder_class,
                    'thread_class': check_streaming_channel_thread
                }
            })
            return [True, "OK"]
        else:
            return [False, error_message]

    def upload_test_run(self, channel_id):
        channel_holder_class = self.baseManagerChannelInfo.ChannelYouTube(
            channel_id, {
                'testUpload': True,
                'debug_mode': self.debug_mode,
                'ffmpeg_logs': self.enable_ffmpeg_logs
            }, self.shared_cookieDictHolder, self.cachedDataHandler,
            self.queue_holder)
        ok_bool, error_message = channel_holder_class.loadVideoData()
        if ok_bool:
            del ok_bool
            del error_message
            if not channel_holder_class.is_live():
                return [
                    False,
                    "Channel is not live streaming! The channel needs to be live streaming!"
                ]

            channel_holder_class.registerCloseEvent()
            channel_name = channel_holder_class.get("channel_name")
            check_streaming_channel_thread = Process(
                target=channel_holder_class.channel_thread,
                name="{0} - Channel Process".format(channel_name))
            check_streaming_channel_thread.start()
            self.channels_dict.update({
                channel_id: {
                    'class': channel_holder_class,
                    'thread_class': check_streaming_channel_thread
                }
            })
            return [True, "OK"]
        else:
            return [False, error_message]

    def loadChannels(self):
        channels = self.cachedDataHandler.getValue('channels')
        if channels:
            for platform in channels:
                channel_list = channels.get(platform)
                for channel_id in channel_list:
                    ok, error_message = self.run_channel(channel_id,
                                                         startup=True,
                                                         platform=platform)
                    if not ok:
                        warning(error_message)

    def run_youtube_queue(self):
        if self.cachedDataHandler.getValue('UploadLiveStreams'):
            self.YouTubeQueueThread = Process(target=runQueue,
                                              name="YouTube Upload Queue",
                                              args=(
                                                  self.youtube_api_handler,
                                                  self.queue_holder,
                                              ))
            self.YouTubeQueueThread.start()

    def run_server(self, cert=None, key=None):
        key = try_get(self.cachedDataHandler, lambda x: x.getValue('ssl_key'),
                      str) if not None else key
        cert = try_get(self.cachedDataHandler,
                       lambda x: x.getValue('ssl_cert'),
                       str) if not None else cert

        loadServer(self,
                   self.cachedDataHandler,
                   self.serverPort,
                   self.youtube_api_handler,
                   cert=cert,
                   key=key)

    def is_google_account_login_in(self):
        cj = self.shared_cookieDictHolder.copy()
        cookie = [cookies for cookies in cj if 'SSID' in cookies]
        if cookie is None or len(cookie) == 0:
            return False
        return True
Пример #36
0
                             dynamic_ncols=True,
                             unit="fr",
                             leave=True,
                             smoothing=0.01)

    def update(self, value):
        if self.first_update:
            self.tqdm_bar.reset(self.left)
            self.first_update = False
        self.tqdm_bar.update(value)

    def close(self):
        self.tqdm_bar.close()


BaseManager.register('Counter', Counter)


def process_pipe(pipe):
    encoder_history = deque(maxlen=20)
    while True:
        line = pipe.stdout.readline().strip()
        if len(line) == 0 and pipe.poll() is not None:
            break
        if len(line) == 0:
            continue
        if line:
            encoder_history.append(line)

    if pipe.returncode != 0 and pipe.returncode != -2:
        print(f"\nEncoder encountered an error: {pipe.returncode}")
from pathlib import Path
from itertools import chain
from multiprocessing import Process, Manager
from multiprocessing.managers import BaseManager
from neuralnets.BiLSTM import BiLSTM
from util.preprocessing import loadDatasetPickle
from util.datasets import Dataset
from evaluator import Evaluator

# paths for input/output directories
models_dir, embeddings_dir = Path('models'), Path('embeddings')
pkl_dir, tables_dir = Path('pkl'), Path('tables')

BaseManager.register('Evaluator', Evaluator)
manager = BaseManager()
manager.start()

loaded_datasets = {}
evaluators = {
    transfer_setting: manager.Evaluator()
    for transfer_setting in ['pretrain_multi_task', 'pretrain_cross_lingual']
}


def eval_single_task(model_path, dataset_id, task, evaluators, embeddings,
                     mappings, data):
    # load the BiLSTM model
    model = BiLSTM.loadModel(model_path)

    # obtain the evaluator based on the transfer setting
    transfer_setting = model_path.parent.name
"""
    Daemon that manages a shared queue to push request data
    from mitmdump to chimp
"""

from multiprocessing.managers import BaseManager
import Queue

queue = Queue.Queue()
BaseManager.register('get_queue', callable=lambda:queue)
m = BaseManager(address=('', 50000), authkey="")
s = m.get_server()
s.serve_forever()
Пример #39
0
    def _start_manager(self):
        def get_knowledge_queue(idx):
            global knowledge_queues
            if idx < len(knowledge_queues):
                return knowledge_queues[idx]
            else:
                return None

        def get_s2t_queue():
            global s2t_queue
            return s2t_queue

        def get_t2s_queue():
            global t2s_queue
            return t2s_queue

        def get_cmd_queue():
            global cmd_queue
            return cmd_queue

        BaseManager.register(
            "get_knowledge_queue", callable=get_knowledge_queue)
        BaseManager.register("get_s2t_queue", callable=get_s2t_queue)
        BaseManager.register("get_t2s_queue", callable=get_t2s_queue)
        BaseManager.register("get_cmd_queue", callable=get_cmd_queue)
        manager = BaseManager(
            address=("", self._out_port), authkey=public_authkey.encode())
        manager.start()
        print("listen on address: {}".format(manager._address))
        print("public authkey: {}".format(public_authkey))
        return manager
Пример #40
0
    def run(self):

        retries = self.options.retry_failed + 1
        completed = 0

        BaseManager.register('LifoQueue', queue.LifoQueue)
        manager = BaseManager()
        manager.start()

        self.results = ExecutionCounter(total=len(self.instances))
        pipeline = manager.LifoQueue()
        done_queue = manager.LifoQueue()

        # Set number of jobs
        if self.options.jobs:
            self.jobs = self.options.jobs
        elif self.options.build_only:
            self.jobs = multiprocessing.cpu_count() * 2
        else:
            self.jobs = multiprocessing.cpu_count()
        logger.info("JOBS: %d" % self.jobs)

        self.update_counting()

        logger.info(
            "%d test scenarios (%d configurations) selected, %d configurations discarded due to filters."
            % (len(self.suites), len(
                self.instances), self.results.skipped_configs))

        while True:
            completed += 1

            if completed > 1:
                logger.info("%d Iteration:" % (completed))
                time.sleep(self.options.retry_interval
                           )  # waiting for the system to settle down
                self.results.done = self.results.total - self.results.failed
                if self.options.retry_build_errors:
                    self.results.failed = 0
                    self.results.error = 0
                else:
                    self.results.failed = self.results.error

            self.execute(pipeline, done_queue)

            while True:
                try:
                    inst = done_queue.get_nowait()
                except queue.Empty:
                    break
                else:
                    inst.metrics.update(self.instances[inst.name].metrics)
                    inst.metrics["handler_time"] = inst.execution_time
                    inst.metrics["unrecognized"] = []
                    self.instances[inst.name] = inst

            print("")

            retries = retries - 1
            # There are cases where failed == error (only build failures),
            # we do not try build failures.
            if retries == 0 or (self.results.failed == self.results.error
                                and not self.options.retry_build_errors):
                break
Пример #41
0
        return self.nested_obj.val


class CustomProcess(Process):
    def __init__(self, obj, p, q, v):
        super(CustomProcess, self).__init__()
        self.obj = obj
        self.index = p, q
        self.v = v

    def run(self):
        self.obj.set_value(*self.index, self.v)


if __name__ == "__main__":
    BaseManager.register('CustomObj', CustomObj)
    manager = BaseManager()
    manager.start()
    data = [[0 for x in range(10)] for y in range(10)]
    matrix = np.matrix(data)
    custom_obj = manager.CustomObj(matrix)
    print(custom_obj.get_obj())
    process_list = []
    for p in range(10):
        for q in range(10):
            proc = CustomProcess(custom_obj, p, q, 10 * p + q)
            process_list.append(proc)
    for x in range(100):
        process_list[x].start()
    for x in range(100):
        process_list[x].join()
Пример #42
0
    return s_names

def run(case, model, set_list, names, xlsx):
    model.learn(logger, set_list, names, 4, args.engine)
    model.log_data()
    case.add_accuracy(model.name, model.accuracy_score)
    case.add_aucs(model.name, model.roc_auc_score)
    case.add_tprs(model.name, model.tprs)
    xlsx.write(case.get_name(), model)
    print model.name
    print model.accuracy_score
    print model.roc_auc_score

if __name__ == "__main__":
    start_time = time.time()
    BaseManager.register('Logger', Logger)
    BaseManager.register('Case', Case)
    BaseManager.register('Xlsx', Xlsx)
    manager = BaseManager()
    manager.start()

    # parse arguments
    args = arg.parse(sys.argv[1:])

    # create logger
    logger = manager.Logger('%s.v%s' %(args.engine, args.version))
    
    # load dataset
    dataset = load_dataset(args.datafile.name)

    # create docx, xlsx for report
Пример #43
0
    def start(self):
        # 把派发作业队列和完成作业队列注册到网络上
        BaseManager.register('get_dispatched_task_queue',
                             callable=self.get_dispatched_task_queue)
        BaseManager.register('get_finished_task_queue',
                             callable=self.get_finished_task_queue)

        task = MysqlHelper.excuteFindOne(
            "select * from tb_task where Fid={}".format(self.taskId))
        Fip = task["FserverIp"]
        Fport = task["Fport"]
        Fauthkey = task["Fauthkey"]
        # 监听端口和启动服务,Fauthkey为验证码,自己随便取的,slave连接获取任务时用于验证身份,FserverIp为主机的ip,Fport连接端口号,一般默认是8888端口
        manager = BaseManager(address=(Fip, Fport), authkey=Fauthkey)
        manager.start()

        # 使用上面注册的方法获取队列
        dispatched_tasks = manager.get_dispatched_task_queue()  #获取派发队列
        finished_tasks = manager.get_finished_task_queue()  #获取返回队列

        while True:
            try:
                task_2 = MysqlHelper.excuteFindOne(
                    "select * from tb_task where Fid={}".format(self.taskId))
                num = int(task_2["Fnum"])
                url_list = self.url_list
                for index, url in enumerate(url_list):
                    print url
                    num += 1
                    dispatched_spider = self.spider
                    dispatched_spider.pageUrl = url
                    dispatched_tasks.put(dispatched_spider)
                    print "派发任务: " + str(index + 1)
                    MysqlHelper.excuteUpdate(self.table_name, {
                        "Fnum": str(num + 1),
                        "Fstate": 1
                    }, "Fid={}".format(self.taskId))
                    time.sleep(1)
                print "------------已完成一轮任务派发----------"
                #完成一轮任务派发就将状态改为休眠中
                MysqlHelper.excuteUpdate(self.table_name, {"Fstate": 2},
                                         "Fid={}".format(self.taskId))
                #返回队列执行结果
                while not dispatched_tasks.empty():
                    #返回一个爬取的数量结果
                    result_spider = finished_tasks.get()
                    print "任务返回结果:"
                #暂停时间继续进行下一轮的任务派发
                # time.sleep(int(task_2["FtimeInterval"]))
                manager.shutdown()
            except Exception as ex:
                MysqlHelper.excuteUpdate(self.table_name, {"Fstate": 0},
                                         "Fid={}".format(self.taskId))
                continue
Пример #44
0
 def __init__(self, address=None, authkey=''.encode('UTF-8')):
     BaseManager.__init__(self, address, authkey)
Пример #45
0
logger = logging.getLogger("TSGRain-Flask-Logger")
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler(conf.TSGRAIN_FLASK_LOGFILE)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
# ch.setLevel(logging.ERROR)
ch.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)

if conf.IPC_FLAG:
    BaseManager.register('queue_StoC')
    BaseManager.register('queue_CtoS')
    m = BaseManager(address=('localhost', 50000), authkey=b'secret')
    try:
        m.connect()
    except:
        logger.error("BaseManager connect(): error")

    queue_s_to_c = m.queue_StoC()
    queue_c_to_s = m.queue_CtoS()


@app.route('/')
@app.route('/index')
def index():
    logger.info('routes.py: /index')
Пример #46
0
 def start_Manager(self, url_q, result_q):
     BaseManager.register('get_task_queue', callable=lambda: url_q)
     BaseManager.register('get_result_queue', callable=lambda: result_q)
     manager = BaseManager(address=('127.0.0.1', 8001), authkey=b'baike')
     return manager
Пример #47
0
    if prt: print('评论数:', comments)

    result = data_to_dict(idd=av,
                          title=title, up=up,
                          rank=rank, summ=summ, danmaku=danmaku, coins=coins, collections=collections, shares=shares, comments=comments,
                          uploaded=uploaded, category=category)
    total += 1
    return result
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
if __name__ == '__main__':

    clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    msg = clientsocket.connect_ex((server_IP, server_port))
    #msg = clientsocket.connect_ex(('127.0.0.1', 2020))
    BaseManager.register('getavq')

    if msg != 0:
        print('连接失败!msg=' + str(msg))
        exit()

    if avshower:
        m = BaseManager(address=('127.0.0.1', 3030), authkey=b'abc')
        m.connect()
        avq = m.getavq()
    else: avq = None

    conn = msghandler(sock=clientsocket, excpt=excptprcs, timeout=60)

    while True:  # socket: 不保证到达,不保证按顺序到达。
        listt = []
Пример #48
0
    def start(self):
        # 把派发作业队列和完成作业队列注册到网络上
        BaseManager.register('get_dispatched_job_queue',
                             callable=self.get_dispatched_job_queue)
        BaseManager.register('get_finished_job_queue',
                             callable=self.get_finished_job_queue)

        # 监听端口和启动服务
        manager = BaseManager(address=('127.0.0.1', 8888), authkey=b'jobs')
        manager.start()

        # 使用上面注册的方法获取队列
        dispatched_jobs = manager.get_dispatched_job_queue()
        finished_jobs = manager.get_finished_job_queue()

        # 这里一次派发10个作业,等到10个作业都运行完后,继续再派发10个作业
        job_id = 0
        while True:
            for i in range(0, 10):
                job_id = job_id + 1
                job = Job(job_id)
                print('Dispatch job: %s' % job.job_id)
                dispatched_jobs.put(job)

            while not dispatched_jobs.empty():
                job = finished_jobs.get(60)
                print('Finished Job: %s' % job.job_id)

        manager.shutdown()
    def incrProgress(self, incr):
        self.iterationCounter += incr
        #if self.iterationCounter > self.maxIterations:
        #print "\nError: Iteration {0} > maxIterations {1}".format(self.iterationCounter, self.maxIterations)
        #else:
        #printProgress(self.iterationCounter, self.maxIterations)
        printProgress(self.iterationCounter, self.maxIterations)

    def resetProgress(self, printStatus=False):
        self.iterationCounter = 0
        if printStatus:
            printProgress(0, self.maxIterations)


BaseManager.register("sharedProgress", sharedProgress)
manager = BaseManager()
manager.start()
sharedProgressBar = manager.sharedProgress(n_ij_iterations)


def i_worker(tau, i_values):
    """worker function to calculate uncorrelated ii sum for given i values with fixed tau"""
    sigma_ii_sum = 0.
    for i in i_values:
        sigma_ii_sum += ij_t0_average(tau, i, i)

    return sigma_ii_sum


def ij_worker(tau, ij_dict):
Пример #50
0
from multiprocessing.managers import BaseManager

# 多进程相互传递信息

if __name__ == '__main__':
    BaseManager.register("task")
    conn = BaseManager(address=("localhost", 8080), authkey=b'abc')
    conn.connect()
    task = conn.task()
    while 1:
        task.put(2)
Пример #51
0
class PartitionStatCollector(object):
    def __init__(self, p_to_test, address):
        self.p_to_test = p_to_test
        self.address = address
        self.prev_p = None
        self.prev_exec_time = None
        self.exec_time_list = []
        self.p_list = []
        self.start = None
        self.min_partitions = int(os.environ[PARALLAX_MIN_PARTITIONS])

    def setup_manager(self):
        if self.start is None:
            self.start = time.time()
        self.m = BaseManager(address=self.address, authkey='parallax_auth')
        queue = queue.Queue()
        BaseManager.register('queue', callable=lambda: queue)
        self.m.start()
        return self.m

    def recv_exec_time(self, processes, cleanup, num_required):
        stop = False
        worker_exec_times = []
        all_alive = True
        while len(worker_exec_times) != num_required and all_alive:
            time.sleep(10)
            q = self.m.queue()
            while q.qsize() > 0:
                worker_exec_times.append(q.get())

            for p in processes:
                if p.poll() is not None:
                    all_alive = False
                    break

        cleanup(None, None)
        time.sleep(10)

        if all_alive:
            curr_p = self.p_to_test
            curr_exec_time = np.mean(worker_exec_times)
            self.p_list.append(curr_p)
            self.exec_time_list.append(curr_exec_time)

            if self.prev_p:
                if self.prev_exec_time < curr_exec_time:
                    # decrease or stop
                    if self.prev_p > curr_p:
                        stop = True
                    else:
                        # search the oposite partitions
                        self.p_to_test = min(self.p_list) / 2
                else:
                    assert (self.prev_exec_time / curr_exec_time) > 1
                    # keep increase or keep decrease
                    if self.prev_p < curr_p:
                        self.p_to_test *= 2
                    else:
                        self.p_to_test /= 2

                if self.p_to_test < self.min_partitions:
                    stop = True
            else:
                # increase first
                self.p_to_test *= 2

            self.prev_p = curr_p
            self.prev_exec_time = curr_exec_time
        else:
            # communication error when num partitions is small
            if self.prev_p:
                stop = True
            else:
                self.p_to_test *= 2
                self.min_partitions = self.p_to_test

        if stop:
            end = time.time()
            self.p_to_test = self._find_optimal_p()
            parallax_log.info('optimal partitions: %d, search time: %d secs' % \
                (self.p_to_test, end - self.start))
            print('optimal partitions: %d, search time: %d secs' % \
                (self.p_to_test, end - self.start))

        return not stop, self.p_to_test

    def _find_optimal_p(self):
        parallax_log.info('start finding optimal p')
        print('start finding optimal p')
        parallax_log.info(self.p_list)
        print(self.p_list)
        parallax_log.info(self.exec_time_list)
        print(self.exec_time_list)

        if len(self.p_list) < 3:
            min_exec_time = min(self.exec_time_list)
            return self.p_list[self.exec_time_list.index(min_exec_time)]

        max_time = float(max(self.exec_time_list))
        exec_times = [t / max_time for t in self.exec_time_list]

        fitfunc = lambda n, a, b, c: b / n + a * (n - 1) + c
        p, pcov = optimize.curve_fit(fitfunc, np.array(self.p_list),
                                     np.array(exec_times))

        min_p = min(self.p_list)
        max_p = max(self.p_list)

        min_exec_time = None
        optimal_p = None
        for i in range(min_p, max_p + 1):
            prediction = fitfunc(i, p[0], p[1], p[2])

            if min_exec_time is None or min_exec_time > prediction:
                min_exec_time = prediction
                optimal_p = i

        return optimal_p
Пример #52
0
def plot(rewards):
    clear_output(True)
    plt.figure(figsize=(20, 5))
    plt.plot(rewards)
    plt.savefig('sac_v2_multi.png')
    # plt.show()
    plt.clf()


if __name__ == '__main__':

    replay_buffer_size = 1e6

    # the replay buffer is a class, have to use torch manager to make it a proxy for sharing across processes
    BaseManager.register('ReplayBuffer', ReplayBuffer)
    manager = BaseManager()
    manager.start()
    replay_buffer = manager.ReplayBuffer(
        replay_buffer_size)  # share the replay buffer through manager

    # choose env
    ENV = ['Pendulum', 'Reacher'][0]
    if ENV == 'Reacher':
        NUM_JOINTS = 2
        LINK_LENGTH = [200, 140]
        SCREEN_SIZE = 1000
        SPARSE_REWARD = False
        SCREEN_SHOT = False
        action_range = 10.0
Пример #53
0

def remove_local_folder(compute_plan_id):
    client = docker.from_env()
    volume_id = get_volume_id(compute_plan_id)
    try:
        local_volume = client.volumes.get(volume_id=volume_id)
        local_volume.remove(force=True)
    except docker.errors.NotFound:
        pass
    except Exception:
        logging.error(f'Cannot remove local volume {volume_id}', exc_info=True)


# Instatiate Ressource Manager in BaseManager to share it between celery concurrent tasks
BaseManager.register('ResourcesManager', ResourcesManager)
manager = BaseManager()
manager.start()
resources_manager = manager.ResourcesManager()


@app.task(ignore_result=True)
def prepare_training_task():
    prepare_task(TRAINTUPLE_TYPE)


@app.task(ignore_result=True)
def prepare_testing_task():
    prepare_task(TESTTUPLE_TYPE)

Пример #54
0
class Master():
    def __init__(self,
                 fromDBSetting,
                 toDBSetting,
                 toDBTableName,
                 line,
                 port,
                 bigFields=None,
                 disPatchedJobListLegth=50,
                 getDataLength=10000):
        # 派发出去的作业队列
        self.dispatchedJobQueue = Queue(disPatchedJobListLegth)
        # 配置作业队列
        self.config = Queue(1)
        self.FROM_DB_CON = None
        self.fromDBSetting = fromDBSetting
        self.getDataLength = getDataLength
        self.port = port
        self.line = line
        self.bigFields = bigFields
        self.config.put({
            'endFlag': False,
            'line': line,
            'from_db_setting': fromDBSetting,
            'to_db_setting': toDBSetting,
            'to_db_table': toDBTableName,
        })
        self.manager = None

    def getDispatchedJobQueue(self):
        return self.dispatchedJobQueue

    def getConfigQueue(self):
        return self.config

    def start(self):
        BaseManager.register('getDispatchedJobQueue',
                             callable=self.getDispatchedJobQueue)
        BaseManager.register('getConfigQueue', callable=self.getConfigQueue)
        self.manager = BaseManager(address=('0.0.0.0', self.port),
                                   authkey=b'huafeng@123+1s')

        self.manager.start()
        self.FROM_DB_CON = self.getDBConnection(self.fromDBSetting)

        self.selectRows(self.line)

        while self.manager.getDispatchedJobQueue().qsize():
            time.sleep(5)
        self.manager.shutdown()

    @staticmethod
    def getDBConnection(DB_SETTINGS):
        if DB_SETTINGS['ENGINE'].endswith('mysql'):
            connection = pymysql.connect(
                host=DB_SETTINGS['HOST'],
                user=DB_SETTINGS['USER'],
                password=DB_SETTINGS['PASSWORD'],
                database=DB_SETTINGS['NAME'],
                port=int(DB_SETTINGS['PORT']),
            )
        elif DB_SETTINGS['ENGINE'].endswith('oracle'):
            dsn = '%s:%s/%s' % (DB_SETTINGS['HOST'], DB_SETTINGS['PORT'],
                                DB_SETTINGS['NAME'])
            connection = cx_Oracle.connect(user=DB_SETTINGS['USER'],
                                           password=DB_SETTINGS['PASSWORD'],
                                           dsn=dsn)
        else:
            connection = None
        return connection

    def selectRows(self, line):
        currentCount = 0
        listLength = 0
        list1 = []
        fromTable = petl.fromdb(self.FROM_DB_CON, line)
        it = iter(fromTable)
        hdr = next(it)
        for one in it:
            currentCount += 1
            listLength += 1
            if self.bigFields is not None:
                bigFields = self.bigFields
                one = list(one)
                if 'BLOB' in bigFields:
                    for n in bigFields['BLOB']:
                        try:
                            one[n] = one[n].read()
                        except Exception as e:
                            print(e)
            list1.append(one)
            if listLength == self.getDataLength:
                #print(self.manager.getDispatchedJobQueue().qsize())
                qList = list1
                self.manager.getDispatchedJobQueue().put(qList)
                list1 = []
                listLength = 0

        if len(list1):
            self.manager.getDispatchedJobQueue().put(list1)

        data = self.manager.getConfigQueue().get(1)
        data['endFlag'] = True
        self.manager.getConfigQueue().put(data)
Пример #55
0
def startfunc():
    #rpc register
    BaseManager.register('get_taskq')
    BaseManager.register('get_ctlq')
    BaseManager.register('push_task')
    BaseManager.register('push_ctl')
    BaseManager.register('get_resq')
    BaseManager.register('get_logq')
    #connect
    global qmgr
    qmgr = BaseManager(address=(config.HOST, config.QPORT),
                       authkey=config.AUTH_KEY)
    logging.info('connecting to queue ..')
    qmgr.connect()
    logging.info('connected to queue success')
    #threads
    ths = {th_monitorres: None, th_console: None, th_monitorlog: None}
    for f, _ in ths.iteritems():
        th = threading.Thread(target=f)
        th.daemon = True
        th.start()
        ths[f] = th
    while True:
        #push_task({'id':uuid.uuid1()})
        time.sleep(1)
    for _, th in ths.iteritems():
        th.join()
        processes_learn = []
        # gpu 0
        process = multiprocessing.Process(target=game.agent_learn, args=(0, start_event_l0, end_event_l0))
        process.start()
        processes_learn.append(process)
        # gpu 1
        process = multiprocessing.Process(target=game.agent_learn, args=(1, start_event_l1, end_event_l1))
        process.start()
        processes_learn.append(process)

        """
            Define Manager
        """
        # TODO, modify device label

        BaseManager.register('get_actions_g7_queue')
        BaseManager.register('get_rewards_g7_queue')
        BaseManager.register('get_states_g7_queue')
        BaseManager.register('get_fap_g7_queue')
        BaseManager.register('get_finish_g7_queue')
        BaseManager.register('get_actions_glb_g7_queue')
        mgr = BaseManager(address=("172.16.68.1", 4444), authkey=b"game")

        mgr.connect()
        actions_g7 = mgr.get_actions_g7_queue()
        rewards_g7 = mgr.get_rewards_g7_queue()
        states_g7 = mgr.get_states_g7_queue()
        fap_g7 = mgr.get_states_g7_queue()
        finish_g7 = mgr.get_finish_g7_queue()
        actions_glb_g7 = mgr.get_actions_glb_g7_queue()
Пример #57
0
import time, sys, queue, random
from multiprocessing.managers import BaseManager

BaseManager.register('get_task')
BaseManager.register('get_result')

conn = BaseManager(address=('127.0.0.1', 5002), authkey=b'123')

try:
    conn.connect()
except:
    print('连接失败')
    sys.exit()

task = conn.get_task()
result = conn.get_result()

while not task.empty():
    n = task.get(timeout=1)
    print('run task %d' % n)
    sleeptime = random.randint(0, 3)
    time.sleep(sleeptime)
    rt = (n, sleeptime)
    result.put(rt)

if __name__ == '__main__':
    pass
# run task 0
# run task 1
# run task 2
# run task 3
Пример #58
0
    lock.acquire()
    bufferHead.printHashQ()
    bufferHead.printFreeList()
    sleepQueue.printSQ()
    lock.release()

    time.sleep(5)

    BufferRelease.brelse(lockedBuffer, bufferHead, lock, sleepQueue)

    print(startStr, 'Finished execution...')


if __name__ == '__main__':
    BaseManager.register('Buffer', Buffer)
    BaseManager.register('SleepQueue', SleepQueue)
    BaseManager.register('BufferHeader')
    lock = multiprocessing.Lock()

    manager = BaseManager()
    manager.start()

    buffer = manager.Buffer()
    sleepQueue = manager.SleepQueue()

    buffer.printFreeList()

    NO_OF_PROCESSES = 3

    processes = []
Пример #59
0
    def run(self, tasks_list, execute_fn, progress_fn=None):
        '''
        :param tasks_list: should be a list of dictionaries containing the input parameters for each individual function call.
             Eg. [{id:1, path:'filepath1', user:'******'},
                  {id:2, path:'filepath2', user:'******'},
                  {id:3, path:'filepath3', user:'******'}
                ]
        :param execute_fn: This should be a function that runs the desired execution for a single task in the 'tasks_list'
        :param progress_fn: An optional function that can be used to get feedback on the progress of execution
        '''

        # make sure the tasks are in the correct format
        self.validate_tasks(tasks_list)
        n_tasks = len(tasks_list)

        def submit_progress():
            if progress_fn is not None:
                success = tracker.get_success()
                errors, _ = tracker.get_errors()
                progress = 0 if n_tasks == 0 else 100.0 * (success +
                                                           errors) / n_tasks
                progress_fn(progress, success, errors)

        # only run multithreaded if we can
        start_time = time.time()
        if self.run_multithreaded:
            # create input and output queues
            try:
                BaseManager.register('FiFoQueue', FiFoQueue)
                BaseManager.register('ExcecutionTracker', ExcecutionTracker)
                manager = BaseManager()
                manager.start()
                request_queue = manager.FiFoQueue()
                result_queue = manager.FiFoQueue()
                tracker = manager.ExcecutionTracker()
            except:
                logging.error(
                    'Unable to start BaseManager. \n'
                    'This is most likely due to an \'if __name__ == \'__main__\':\''
                    ' statement not being present in your main executing script.\n'
                    'See https://healthq.atlassian.net/browse/DMH-212?focusedCommentId=25606&page=com.atlassian.jira.plugin.system.issuetabpanels%3Acomment-tabpanel#comment-25606'
                    ' http://stackoverflow.com/a/18205006 for more details')
                raise

            # fill input queue with tasks
            request_queue.push_multiple(tasks_list)

            if self.n_threads is None:
                self.n_threads = mp.cpu_count()
            nodes = list()

            # wait for all execution nodes to finish
            while not request_queue.is_empty() or any(
                [node.is_alive() for node in nodes]):
                exception_raised = len([
                    node for node in nodes
                    if not node.is_alive() and node.exitcode != 0
                ]) > 0
                if exception_raised:
                    raise Exception(
                        'Exception while executing. Please see logs for more details.'
                    )

                nodes = [node for node in nodes if node.is_alive()]
                while len(nodes) < self.n_threads:
                    node = WorkerNode(request_queue, result_queue, tracker,
                                      execute_fn, self.recycle_proc_after,
                                      self.raise_on_error_types)
                    nodes.append(node)
                    node.start()

                submit_progress()
                time.sleep(0.5)

            if tracker.should_terminate():
                exit(1)
        else:
            request_queue = FiFoQueue()
            result_queue = FiFoQueue()
            tracker = ExcecutionTracker()
            node = WorkerNode(request_queue,
                              result_queue,
                              tracker,
                              execute_fn,
                              recycle_proc_after=0,
                              raise_on_error_types=self.raise_on_error_types)
            for task in tasks_list:
                request_queue.push(task)
                node.run(
                )  # execution will be done on the same thread (note we call run() here and not start())
                submit_progress()

        submit_progress()
        self.elapsed_time = time.time() - start_time

        # read all outputs into a list
        lst_outputs = result_queue.pop_all()

        # log execution results and return output
        return lst_outputs
Пример #60
0
from multiprocessing import Lock
from multiprocessing.managers import BaseManager
from shared import LOCK_MGR_PORT, LOCK_MGR_PWD
import time

lock = Lock()
# dicts to store which keys are currently being used
keymap = dict()

def get_key(key):
    with lock:
        if key not in keymap:
            keymap[key] = True
            return True
        return False

def release_key(key):
    with lock:
        if key in keymap:
            del keymap[key]
            return True
        return False
    
if __name__ == '__main__':
    manager = BaseManager(('', LOCK_MGR_PORT), LOCK_MGR_PWD)
    manager.register('get_key', get_key)
    manager.register('release_key', release_key)
    server = manager.get_server()
    print('lock service running on port %d' % LOCK_MGR_PORT)
    server.serve_forever()