Exemplo n.º 1
0
    def execute(self, item_generator, item_processor):
        # Create a queue to communicate with the worker threads
        try:
            queue = Queue()
            threads = []
            # Create 8 worker threads
            for x in range(self.no_of_threads):
                worker = Neo4jUploadWorker(queue, item_processor)
                # Setting daemon to True will let the main thread exit even though the workers are blocking
                worker.daemon = True
                worker.start()
                threads.append(worker)
            # Put the tasks into the queue as a tuple
            for item in item_generator.generate:
                queue.put(item)
            # Causes the main thread to wait for the queue to finish processing all the tasks
            queue.join()

            # stop workers
            for i in range(self.no_of_threads):
                queue.put(None)
            for t in threads:
                t.join()
        finally:
            print('In the finally')
            item_generator.close()
            for i in range(self.no_of_threads):
                queue.put(None)
            for t in threads:
                t.join()
Exemplo n.º 2
0
def main_shutdown_callback(q: mp.Queue, *, cbtype: "ShutdownCallback"):
    async def main():
        # Inform the test caller that the main coro is ready
        q.put_nowait("ok")
        await asyncio.sleep(10.0)
        # Inform the test caller that the fut was unblocked successfully.
        q.put_nowait(True)

    if cbtype is CallbackType.FUNCTION:
        def shutdown_callback(loop):
            q.put_nowait(True)
    elif cbtype is CallbackType.ASYNC_DEF_FUNCTION:
        async def shutdown_callback(loop):
            q.put_nowait(True)
    elif cbtype is CallbackType.COROUTINE_OBJECT:
        async def shutdown_callback_fn(loop):
            q.put_nowait(True)

        shutdown_callback = shutdown_callback_fn(None)
    else:
        raise TypeError('Unexpected cbtype')

    run(main(), shutdown_callback=shutdown_callback)
    q.put(None)
    q.join()
Exemplo n.º 3
0
def main_sigterm_enduring_indirect_cancel(q: mp.Queue):
    async def corofn():
        q.put_nowait("ok")
        await asyncio.sleep(0.2)
        q.put_nowait(True)

    def direct_cancel():
        """Reach inside, find the one task that is marked "do not cancel"
        for shutdown, and then cancel it directly. This should raise
        CancelledError at the location of the caller for
        `shutdown_waits_for()`."""
        tasks = all_tasks()
        for t in tasks:  # pragma: no cover
            if t._coro in _DO_NOT_CANCEL_COROS:
                t.cancel()
                return

    async def main():
        loop = asyncio.get_event_loop()
        coro = corofn()
        loop.call_later(0.1, direct_cancel)
        try:
            await shutdown_waits_for(coro)
        except asyncio.CancelledError:
            q.put_nowait("got cancellation as expected")
        else:
            q.put_nowait("no cancellation raised")

    run(main())
    q.put(None)
    q.join()
Exemplo n.º 4
0
def ip():
    num_threads = 10
    q = Queue()

    def pingme(i, queue):
        while True:
            ip = queue.get()
            ret = subprocess.call('ping -c 1 %s' % i, shell=True, stdout=open('e:\\git\\ip.txt', 'w'),
                                  stderr=subprocess.STDOUT)
            if ret == 0:
                print('%s-%s is up!' % i)
            elif ret == 1:
                print('%s is down...' % i)
            queue.task_done()

    for i in range(num_threads):
        t = Thread(target=pingme, args=(i, q))  # 多线程调用
        t.setDaemon(True)  # 设置守护线程
        t.start()
    info = Monitor.query.all()
    for i in info:
        q.put(i.IpAddr)  # 上传列表
    q.join()
    print('完成')
    return 'ok'
Exemplo n.º 5
0
def main_shutdown_callback_error_and_main_error(q: mp.Queue):

    import logging

    log_messages = []

    def filt(record):
        log_messages.append(record.getMessage())
        return record

    logging.getLogger("aiorun").addFilter(filt)

    async def main():
        await asyncio.sleep(1e-3)
        raise Exception("main")

    def shutdown_callback(loop):
        raise Exception("blah")

    try:
        run(main(), stop_on_unhandled_errors=True, shutdown_callback=shutdown_callback)
    except Exception as e:
        q.put_nowait(e)
    else:
        q.put_nowait("exception was not raised")
    finally:
        q.put_nowait(log_messages)
        q.put_nowait(None)
        q.join()
Exemplo n.º 6
0
def test_looker():
    print('Прослушивание почты...')
    params = config()
    spreadsheetId = params.get('spreadsheetid', None)
    GOOGLE_CREDENTIALS_FILE = params.get('credential_file', None)

    GOOGLE_CREDENTIALS_DATA = get_obj_json_from_file(GOOGLE_CREDENTIALS_FILE)
    print("type=", type(GOOGLE_CREDENTIALS_DATA))
    google_sheets_creadential_json = GOOGLE_CREDENTIALS_DATA
    # while True:
    #     init_looker_multythread(spreadsheetId=spreadsheetId,
    #                        google_sheets_creadential_json=GOOGLE_CREDENTIALS_DATA,
    #                        imap_server='imap.yandex.ru',
    #                        email='*****@*****.**',
    #                        passwd='kpwoltwjqpsboxde',
    #                        folder='test2')
    imap_server = 'imap.yandex.ru'
    email = '*****@*****.**'
    passwd = 'alpglyneshancphh'
    folder = 'test2'
    queue = Queue()
    IMAP4_server = imaplib.IMAP4_SSL(imap_server)
    IMAP4_server.login(email, passwd)
    look = SilentLooker(queue, folder, IMAP4_server)

    # Запускаем потом и очередь
    for i in range(5):
        t = SSWriter(queue, spreadsheetId, google_sheets_creadential_json)
        t.setDaemon(True)
        t.start()

    look.run()
    # Ждем завершения работы очереди
    queue.join()
Exemplo n.º 7
0
    def execute(self, item_generator, item_processor):
        # Create a queue to communicate with the worker threads
        try:
            queue = Queue()
            threads = []
            # Create 8 worker threads
            for x in range(self.no_of_threads):
                worker = Neo4jUploadWorker(queue, item_processor)
                # Setting daemon to True will let the main thread exit even though the workers are blocking
                worker.daemon = True
                worker.start()
                threads.append(worker)
            # Put the tasks into the queue as a tuple
            for item in item_generator.generate:
                queue.put(item)
            # Causes the main thread to wait for the queue to finish processing all the tasks
            queue.join()

            # stop workers
            for i in range(self.no_of_threads):
                queue.put(None)
            for t in threads:
                t.join()
        finally:
            print("In the finally")
            item_generator.close()
            for i in range(self.no_of_threads):
                queue.put(None)
            for t in threads:
                t.join()
Exemplo n.º 8
0
    def vulnerabilities_by_date(self, year):
        url = F"https://www.cvedetails.com/vulnerability-list/year-{year}/vulnerabilities.html"
        res = requests.get(url, headers=self.headers)
        html = etree.HTML(res.content)

        #获得漏洞页面的链接漏洞总数:
        total_vuln = html.xpath('//*[@id="pagingb"]/b/text()')
        link = html.xpath('//*[@id="pagingb"]/a/@href')
        page_link = ["https://www.cvedetails.com" + i for i in link]

        #创建表格
        cur = self.conn.cursor()
        #cve_id, cve_type, cve_score, cve_authority, cve_vendor, cve_produce, cve_produce_version
        sql = F"CREATE TABLE IF NOT EXISTS cve{year}(cve_id TEXT PRIMARY KEY, type TEXT,score TEXT, authority TEXT, vendor TEXT, produce TEXT, produce_version TEXT)"
        cur.execute(sql)

        #设置两个队列
        url_queue = Queue(maxsize=self.thread_num * 3)
        cve_info_queue = Queue(maxsize=self.thread_num * 3)

        #生成cve详情url
        producer_thread = Process(target=self.producer,
                                  args=(url_queue, page_link))
        producer_thread.daemon = True
        producer_thread.start()

        #处理cve详情页面
        for index in range(self.thread_num):
            consumer_thread = Process(target=self.cve_data,
                                      args=(
                                          url_queue,
                                          cve_info_queue,
                                      ))
            consumer_thread.daemon = True
            consumer_thread.start()

        #将cve信息存储到表格之中
        excel_thread = Process(target=self.write_sql,
                               args=(
                                   cve_info_queue,
                                   cur,
                                   year,
                               ))
        excel_thread.daemon = True
        excel_thread.start()

        #控制线程进度,确定能够生产完毕
        producer_thread.join()
        url_queue.join()
        # print(url_queue.qsize())
        cve_info_queue.join()
        # print(cve_info_queue.qsize())

        self.conn.commit()
        # self.conn.close()
        print(F"{year}年cve信息全部写入成功")
Exemplo n.º 9
0
def main_sig_pause(q: mp.Queue):
    async def main():
        try:
            q.put_nowait("ok")
            await asyncio.sleep(5.0)
        except asyncio.CancelledError:
            print("in cancellation handler")
            await asyncio.sleep(0.1)

    run(main())
    q.put("done")
    q.put(None)
    q.join()
class ThreadPool:
    """Pool of threads consuming tasks from a queue"""
    def __init__(self, num_threads):
        self.tasks = Queue(num_threads)
        for _ in range(num_threads):
            Worker(self.tasks)

    def add_task(self, func, *args, **kargs):
        """Add a task to the queue"""
        self.tasks.put((func, args, kargs))

    def wait_completion(self):
        """Wait for completion of all the tasks in the queue"""
        self.tasks.join()
Exemplo n.º 11
0
def main(links, output_dir):
    # Create a queue to communicate with the worker threads
    queue = Queue()
    # Create 5 worker threads
    for x in range(5):
        worker = Worker(queue, output_dir)
        # Setting daemon to True will let the main thread exit even though the workers are blocking
        worker.setDaemon(True)
        worker.start()
    # Put the tasks into the queue as a tuple
    for link in links:
        queue.put(link)

    # Causes the main thread to wait for the queue to finish processing all the tasks
    queue.join()
Exemplo n.º 12
0
def main_shutdown_callback_error(q: mp.Queue):
    async def main():
        await asyncio.sleep(1e-3)

    def shutdown_callback(loop):
        raise Exception("blah")

    try:
        run(main(), shutdown_callback=shutdown_callback)
    except Exception as e:
        q.put_nowait(e)
    else:
        q.put_nowait("exception was not raised")
    finally:
        q.put_nowait(None)
        q.join()
Exemplo n.º 13
0
def main():
    manager = Manager()
    d = manager.dict()
    que=Queue(10000)

    p = Process(recv_items(que))
    p.daemon = True
    p.start()


    pool = Pool(100)
    for i in range(1,100):
        pool.apply_async(handle,args=(que,i,d))

    p.join()

    que.join()
Exemplo n.º 14
0
def main(q: mp.Queue, **kwargs):
    async def main():
        q.put("ok")
        await asyncio.sleep(5.0)

    if kwargs.pop("use_exe", None):
        exe = ThreadPoolExecutor()
    else:
        exe = None

    loop = None
    if kwargs.pop("user_supplied_loop", None) and not kwargs.get("use_uvloop"):
        loop = newloop()

    try:
        run(main(), executor=exe, loop=loop, **kwargs)
    finally:
        q.put(None)
        q.join()
Exemplo n.º 15
0
def main_shutdown_callback(q: mp.Queue):
    fut = None

    async def _main():
        nonlocal fut
        q.put_nowait("ok")
        fut = asyncio.Future()
        await fut
        q.put_nowait(True)

    async def main():
        await shutdown_waits_for(_main())

    def shutdown_callback(loop):
        fut.set_result(None)

    run(main(), shutdown_callback=shutdown_callback)
    q.put(None)
    q.join()
Exemplo n.º 16
0
def main_sigterm_enduring_create_task(q: mp.Queue, spawn_method):
    async def corofn():
        q.put_nowait("ok")
        await asyncio.sleep(0.05)
        q.put_nowait(True)

    async def main():
        loop = asyncio.get_event_loop()
        if spawn_method == "create_task":
            loop.create_task(shutdown_waits_for(corofn()))
        elif spawn_method == "ensure_future":
            asyncio.ensure_future(shutdown_waits_for(corofn()))
        elif spawn_method == "await":
            await shutdown_waits_for(corofn())
        elif spawn_method == "bare":
            shutdown_waits_for(corofn())

    run(main())
    q.put(None)
    q.join()
Exemplo n.º 17
0
def main(sysargv=[]):
    TLIM = 50  #the maximum number of concurrent threads
    MLIM = 1000.0  #the memory limit (in GB)
    if len(sysargv) > 0:
        TLIM = int(sysargv[0])
    if len(sysargv) > 1:
        MLIM = float(sysargv[1])

    #imprecise safety precautions
    TLIM = TLIM - 1
    MLIM = int(MLIM * (2**20) * float(TLIM) / (TLIM + 1))  #in kB

    t_arr = []  #array of threads
    q = Queue()  #queue of commands
    lock = Lock()

    #################################################
    ##GENERATE FILELIST HERE
    filelist = []
    #################################################

    #put each command on the queue
    for i, f in enumerate(filelist):  #i is the counter, f is the filename

        #################################################
        ##GENERATE CMD HERE
        cmd = "echo \"stuff\""
        #################################################

        q.put(cmd)

    #create TLIM workers to process the queue
    for i in range(TLIM):
        t_arr.append(threading.Thread(target=worker, args=(
            q,
            lock,
        )))
        t_arr[i].daemon = True
        t_arr[i].start()

    q.join()
Exemplo n.º 18
0
def main():
    q = Queue(120000)

    database = load_database(DATABASE)

    producer = Thread(target=generate, args=(q, "weakpass_2a"))
    producer.start()

    consumers = []
    for i in range(4):
        name = 'Consumer-{}'.format(i)
        consumer = Process(target=check, args=(name, q, database))
        consumer.start()
        consumers.append(consumer)

    producer.join()

    for consumer in consumers:
        consumer.join()

    q.join()
Exemplo n.º 19
0
def init_looker_multythread(spreadsheetId, google_sheets_creadential_json,
                            imap_server, email, passwd, folder):
    IMAP4_server = imaplib.IMAP4_SSL(imap_server)
    IMAP4_server.login(email, passwd)
    emails_info = get_data_email_message(folder, IMAP4_server)
    """
    Запускаем программу
    """
    queue = Queue()

    # Запускаем очередь
    for i in range(5):
        t = SSWriter(queue, spreadsheetId, google_sheets_creadential_json)
        t.setDaemon(True)
        t.start()

    # Добавляем в очередь письмо
    for email in emails_info:
        queue.put(email)

    # Ждем завершения работы очереди
    queue.join()
Exemplo n.º 20
0
    def runClient(self):
        try:
            send_msg = Queue()
            recive_msg = Queue()
            sending = Process(target=self.send, args=(send_msg))
            reciveing = Process(target=self.receive, arg=(recive_msg))
            sending.start()
            reciveing.start()
            while True:
                reciveing.join()
                command = recive_msg.get()

                # Arduino Parts
                Start_point = time.time()
                self.arduino.Value_to_T_data(self.position.BF_desire,
                                             self.position.LR_desire)
                self.arduinoSerial_write()
                self.arduinoSerial_read()
                if self.arduino.R_data != "":
                    print(self.arduino.R_data)
                    send_msg.put(self.arduino.R_data)
                    send_msg.join()
                End_point = time.time()
                time.sleep(self.arduino.Loop_time - (End_point - Start_point))
                reciveing.run()
                send_msg.run()

        except KeyboardInterrupt:
            print("Emergency stop!!!!")
            self.sock.close()
            reciveing.close()
            send_msg.close()
        finally:
            print("OFF")
            self.sock.close()
            reciveing.close()
            send_msg.close()
Exemplo n.º 21
0
class PlasmaShmQueue:
    def __init__(self, maxsize: int = 0):
        r"""Use pyarrow in-memory plasma store to implement shared memory queue.

        Compared to native `multiprocess.Queue`, `PlasmaShmQueue` avoid pickle/unpickle
        and communication overhead, leading to better performance in multi-process
        application.

        :type maxsize: int
        :param maxsize: maximum size of the queue, `None` means no limit. (default: ``None``)
        """

        self.socket_name = MGE_PLASMA_STORE_MANAGER.socket_name

        # TODO: how to catch the exception happened in `plasma.connect`?
        self.client = None

        # Used to store the header for the data.(ObjectIDs)
        self.queue = Queue(maxsize)  # type: Queue

    def put(self, data, block=True, timeout=None):
        if self.client is None:
            self.client = plasma.connect(self.socket_name)
        try:
            object_id = self.client.put(data)
        except plasma.PlasmaStoreFull:
            raise RuntimeError("plasma store out of memory!")
        try:
            self.queue.put(object_id, block, timeout)
        except queue.Full:
            self.client.delete([object_id])
            raise queue.Full

    def get(self, block=True, timeout=None):
        if self.client is None:
            self.client = plasma.connect(self.socket_name)
        object_id = self.queue.get(block, timeout)
        if not self.client.contains(object_id):
            raise RuntimeError(
                "ObjectID: {} not found in plasma store".format(object_id)
            )
        data = self.client.get(object_id)
        self.client.delete([object_id])
        return data

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def join(self):
        self.queue.join()

    def disconnect_client(self):
        if self.client is not None:
            self.client.disconnect()

    def close(self):
        self.queue.close()
        self.disconnect_client()

    def cancel_join_thread(self):
        self.queue.cancel_join_thread()
    c1=Process(target=consumer,args=('alex',q))


    p1.start()
    c1.start()


\由消费者发通知,说它确实收到了数据。使用JoinableQueue方法
#JoinableQueue([maxsize]):这就像是一个Queue对象,但队列允许项目的使用者通知生成者项目已经被成功处理。通知进程是使用共享的信号和条件变量来实现的。
   #参数介绍:
    maxsize是队列中允许最大项数,省略则无大小限制。    
  #方法介绍:
    JoinableQueue的实例p除了与Queue对象相同的方法之外还具有:
    q.task_done(): 使用者使用此方法发出信号,表示q.get()的返回项目已经被处理。如果调用此方法的次数大于从队列中删除项目的数量,将引发ValueError异常
    q.join(): 生产者调用此方法进行阻塞,直到队列中所有的项目均被处理。阻塞将持续到队列中的每个项目均调用q.task_done()方法为止

from multiprocessing import Process,Queue,JoinableQueue
import time
import random

def producer(name,food,q):
    for i in range(3):
        res='%s%s' %(food,i)
        time.sleep(random.randint(1,3))
        q.put(res)
        print('%s 生产了 %s' %(name,res))

def consumer(name,q):
    while True:
        res=q.get()
Exemplo n.º 23
0
def exampleJob(worker):
    time.sleep(0.5)
    
    with print_lock : 
        print(threading.current_thread().name,worker)

def threader():
    while True : 
        worker = q.get()
        exampleJob(worker)
        q.task_done()

q = Queue()

for x in range(10) : #10 workers
    t = threading.Thread(target = threader)
    
    t.daemon = True 
    
    t.start()
    
start = time.time()

for worker in range(20) : #20 jobs 
    q.put(worker)
    
q.join()

print("Entire job took : ", time.time()-start)

Exemplo n.º 24
0
def netService(a, cmd):
    while True:
        data = listen()
        print(data[1].decode("utf-8")[0])
        if data[1].decode("utf-8")[0] == 'd':
            dry = ['Not dry', 'Dry', 'Too dry']
            send('{"Temperature":%d,"Humidity":%d,"Dry Status":"%s","Light":%d}' % (a[0], a[1], dry[a[2]], a[3]), data)
        else:
            send("a", data)


def add(l, cmd):
    for i in range(4):
        l.append(i)



if __name__ == "__main__":
    q = Queue()
    m = Manager()
    cmd='1'
    l = m.list()
    p = Process(target=netService, args=(l, cmd))
    q = Process(target=add, args=(l, cmd))
    p.start()
    q.start()
    p.join()
    q.join()

Exemplo n.º 25
0
            
            #esponse = client.upload(, is_dir=True)

            # send to this node 
        else:
            end_pointer = current_photo_pointer+photos_per_node
            if end_pointer > num_photos:
                 end_pointer = num_photos
            photolist = photo_list[current_photo_pointer:end_pointer]
            current_photo_pointer+=photos_per_node
            print(str(eachnode) + " : " + str(photolist))
            job_map = {'title': 'upload_image', 'client': node_client[each_node], 'list': photo_list, 'filepath': images_filepath+'/images'}
            job_queue.put(job_map)
            # send regular number of photos

    job_queue.join()
    # extract 


    # client = lib.FileClient('localhost:8080')
    # # demo for file uploading
    # in_file_name = 'IMG_2331.JPG'
    # response = client.upload('dataset/images/images/**', is_dir=True)
    # print("response" + str(response))




    #execute a task on the server

Exemplo n.º 26
0

if __name__ == '__main__':
    try:
        os.mkdir("image")
    except:
        pass

    q = Queue(100)
    q_for_v = Queue(50)
    app = QtWidgets.QApplication(sys.argv)
    ui = Ui_MainWindow(q, q_for_v)
    p1 = Process(target=producer, args=(q, q_for_v))
    c = Process(target=video_load, args=(q_for_v, r'./gopro8(1)_best.mp4', 0))
    c1 = Process(target=video_load_1,
                 args=(q_for_v, r'./gopro8(2)_best.mp4', 1))

    ui.show()
    p1.start()
    c.start()
    c1.start()

    sys.exit(app.exec_())
    p1.join()
    c.join()
    c1.join()
    q_for_v.join()
    q.join()
    # q_for_v.close()
    # q.close()
Exemplo n.º 27
0
class DeployManager(object):
    def __init__(self, args):
        self._args = args

        self.awsutils = AwsUtils(access_key=args.key, secret_key=args.secret, region=args.region)
        self.task_queue = Queue()

        self.cluster_list = self.awsutils.list_clusters()
        self.threads_count = args.threads_count
        self.service_wait_max_attempts = args.service_wait_max_attempts
        self.service_wait_delay = args.service_wait_delay

        self.key = args.key
        self.secret = args.secret
        self.region = args.region

        self.error = False

        # 削除対象
        self.delete_service_list = []
        # 全サービス
        self.all_service_list = []
        # デプロイ対象の全サービス
        self.all_deploy_target_service_list = []

        # 優先付きのstopBeforeDeployサービス
        self.primary_stop_before_deploy_service_list = []
        # stopBeforeDeployサービス
        self.stop_before_deploy_service_list = []
        # 優先付きのサービス
        self.primary_deploy_service_list = []
        # それ以外
        self.remain_deploy_service_list = []

        self.delete_scheduled_task_list = []
        self.scheduled_task_list = []

        self.environment = None
        self.template_group = None
        self.is_service_zero_keep = True
        self.is_stop_before_deploy = True
        self.is_delete_unused_service = True
        self.force = False

    def _service_config(self):
        self.all_service_list,\
            self.all_deploy_target_service_list,\
            self.scheduled_task_list,\
            self.deploy_scheduled_task_list,\
            self.environment = get_deploy_list(
                    services_yaml=self._args.services_yaml,
                    environment_yaml=self._args.environment_yaml,
                    task_definition_template_dir=self._args.task_definition_template_dir,
                    task_definition_config_json=self._args.task_definition_config_json,
                    task_definition_config_env=self._args.task_definition_config_env,
                    deploy_service_group=self._args.deploy_service_group,
                    template_group=self._args.template_group
                )
        # thread数がタスクの数を超えているなら減らす
        deploy_size = len(self.deploy_scheduled_task_list) + len(self.all_deploy_target_service_list)
        if deploy_size < self.threads_count:
            self.threads_count = deploy_size
        self.is_service_zero_keep = self._args.service_zero_keep
        self.template_group = self._args.template_group
        self.is_delete_unused_service = self._args.delete_unused_service
        self.is_stop_before_deploy = self._args.stop_before_deploy

    def _set_deploy_list(self):
        for service in self.all_deploy_target_service_list:
            if self.is_stop_before_deploy and service.stop_before_deploy:
                if service.is_primary_placement:
                    self.primary_stop_before_deploy_service_list.append(service)
                else:
                    self.stop_before_deploy_service_list.append(service)
            else:
                if service.is_primary_placement:
                    self.primary_deploy_service_list.append(service)
                else:
                    self.remain_deploy_service_list.append(service)

    def _unstopped_primary_stop_before_deploy_service_list(self) -> list:
        return [x for x in self.primary_stop_before_deploy_service_list if x.origin_desired_count > 0]

    def _unstopped_stop_before_deploy_service_list(self) -> list:
        return [x for x in self.stop_before_deploy_service_list if x.origin_desired_count > 0]

    def _start_threads(self):
        # threadの開始
        for i in range(self.threads_count):
            thread = DeployProcess(
                task_queue=self.task_queue,
                key=self.key,
                secret=self.secret,
                region=self.region,
                is_service_zero_keep=self.is_service_zero_keep,
                is_stop_before_deploy=self.is_stop_before_deploy,
                service_wait_max_attempts=self.service_wait_max_attempts,
                service_wait_delay=self.service_wait_delay
            )
            thread.setDaemon(True)
            thread.start()

    def run(self):
        self._service_config()
        self._start_threads()
        self._fetch_ecs_information()

        # Step: Delete Unused Service
        self._delete_unused()
        self._check_deploy()
        self._set_deploy_list()

        self._stop_scheduled_task()
        self._stop_before_deploy()
        self._deploy_service()
        self._start_after_deploy()
        self._deploy_scheduled_task()

        self._result_check()

    def dry_run(self):
        self._service_config()
        self._start_threads()
        self._fetch_ecs_information()

        # Step: Check Delete Service
        self._delete_unused(dry_run=True)
        # Step: Check Service
        self._check_deploy()
        self._set_deploy_list()

    def delete(self):
        self.environment = self._args.environment
        self.force = self._args.force
        self._start_threads()
        self._fetch_ecs_information(is_all=True)

        if self.delete_service_list == 0 and self.delete_scheduled_task_list == 0:
            info("No delete service or scheduled task")
            return
        h1("Delete Service or Scheduled Task")
        for service in self.delete_service_list:
            print("* %s" % service.service_name)
        for task in self.delete_scheduled_task_list:
            print("* %s" % task.family)

        if not self.force:
            reply = input("\nWould you like delete all ecs service in %s (y/n)\n" % self.environment)
            if reply != 'y':
                return
        self._delete_unused()

    def _stop_scheduled_task(self):
        if len(self.deploy_scheduled_task_list) > 0:
            h1("Step: Stop ECS Scheduled Task")
            for task in self.deploy_scheduled_task_list:
                self.task_queue.put([task, ProcessMode.stopScheduledTask])
            self.task_queue.join()

    def _stop_before_deploy(self):
        if len(self._unstopped_primary_stop_before_deploy_service_list()) > 0 \
                or len(self._unstopped_stop_before_deploy_service_list()) > 0:
            h1("Step: Stop ECS Service Before Deploy")
            for service in self._unstopped_primary_stop_before_deploy_service_list():
                self.task_queue.put([service, ProcessMode.stopBeforeDeploy])
            for service in self._unstopped_stop_before_deploy_service_list():
                self.task_queue.put([service, ProcessMode.stopBeforeDeploy])
            self.task_queue.join()
            h2("Wait for Service Status 'Stable'")
            self._wait_for_stable(self._unstopped_primary_stop_before_deploy_service_list())
            self._wait_for_stable(self._unstopped_stop_before_deploy_service_list())

    def _start_after_deploy(self):
        if len(self.primary_stop_before_deploy_service_list) > 0:
            h1("Step: Start Primary ECS Service After Deploy")
            for service in self.primary_stop_before_deploy_service_list:
                self.task_queue.put([service, ProcessMode.deployService])
            self.task_queue.join()
            h2("Wait for Service Status 'Stable'")
            self._wait_for_stable(self.primary_stop_before_deploy_service_list)
        if len(self.stop_before_deploy_service_list) > 0:
            h1("Step: Start ECS Service After Deploy")
            for service in self.stop_before_deploy_service_list:
                self.task_queue.put([service, ProcessMode.deployService])
            self.task_queue.join()
            h2("Wait for Service Status 'Stable'")
            self._wait_for_stable(self.stop_before_deploy_service_list)

    def _deploy_scheduled_task(self):
        if len(self.deploy_scheduled_task_list) > 0:
            h1("Step: Deploy ECS Scheduled Task")
            for task in self.deploy_scheduled_task_list:
                self.task_queue.put([task, ProcessMode.deployScheduledTask])
            self.task_queue.join()

    def _delete_unused(self, dry_run=False):
        if dry_run:
            h1("Step: Check Delete Unused")
        else:
            h1("Step: Delete Unused")
        if not self.is_delete_unused_service:
            info("Do not delete unused")
            return
        if len(self.delete_service_list) == 0 and len(self.delete_scheduled_task_list) == 0:
            info("There was no service or task to delete.")
        for delete_service in self.delete_service_list:
            success("Delete service '{delete_service.service_name}'".format(delete_service=delete_service))
            if not dry_run:
                self.awsutils.delete_service(delete_service.cluster_name, delete_service.service_name)
        for delete_scheduled_task in self.delete_scheduled_task_list:
            success("Delete scheduled task '{delete_scheduled_task.name}'"
                    .format(delete_scheduled_task=delete_scheduled_task))
            if not dry_run:
                self.awsutils.delete_scheduled_task(
                    name=delete_scheduled_task.name,
                    target_arn=delete_scheduled_task.task_environment.target_lambda_arn
                )

    def _fetch_ecs_information(self, is_all=False):
        h1("Step: Fetch ECS Information")
        describe_service_list = []
        if len(self.all_service_list) > 0 or is_all:
            describe_service_list = ecs.service.fetch_aws_service(
                cluster_list=self.cluster_list, awsutils=self.awsutils
            )
            for s in describe_service_list:
                self.task_queue.put([s, ProcessMode.fetchServices])
        cloud_watch_rule_list = []
        if len(self.scheduled_task_list) > 0 or is_all:
            rules = self.awsutils.list_cloudwatch_event_rules()
            for r in rules:
                if r.get('Description') == scheduled_task_managed_description:
                    c = CloudwatchEventRule(r)
                    cloud_watch_rule_list.append(c)
                    self.task_queue.put([c, ProcessMode.fetchCloudwatchEvents])
        while self.task_queue.qsize() > 0:
            print('.', end='', flush=True)
            time.sleep(3)
        self.task_queue.join()
        info("")

        # set service description and get delete servicelist
        for describe_service in describe_service_list:
            if self.environment != describe_service.task_environment.environment:
                continue
            if self.template_group is not None:
                if self.template_group != describe_service.task_environment.template_group:
                    continue
            is_delete = True
            for service in self.all_service_list:
                if service.service_name == describe_service.service_name:
                    if service.task_environment.cluster_name == describe_service.cluster_name:
                        service.set_from_describe_service(describe_service=describe_service)
                        is_delete = False
                        break
            if is_delete:
                self.delete_service_list.append(describe_service)
        for cloud_watch_rule in cloud_watch_rule_list:
            if self.environment != cloud_watch_rule.task_environment.environment:
                continue
            if self.template_group is not None:
                if self.template_group != cloud_watch_rule.task_environment.template_group:
                    continue
            is_delete = True
            for scheduled_task in self.scheduled_task_list:
                if scheduled_task.family == cloud_watch_rule.family:
                    scheduled_task.set_from_cloudwatch_event_rule(cloud_watch_rule)
                    is_delete = False
                    break
            if is_delete:
                self.delete_scheduled_task_list.append(cloud_watch_rule)
        success("Check succeeded")

    def _deploy_service(self):
        if len(self.primary_deploy_service_list) > 0:
            h1("Step: Deploy Primary ECS Service")
            for service in self.primary_deploy_service_list:
                self.task_queue.put([service, ProcessMode.deployService])
            self.task_queue.join()
            h2("Wait for Service Status 'Stable'")
            self._wait_for_stable(self.primary_deploy_service_list)
        if len(self.remain_deploy_service_list) > 0:
            h1("Step: Deploy ECS Service")
            for service in self.remain_deploy_service_list:
                self.task_queue.put([service, ProcessMode.deployService])
            self.task_queue.join()
            h2("Wait for Service Status 'Stable'")
            self._wait_for_stable(self.remain_deploy_service_list)

    def _check_deploy(self):
        h1("Step: Check Deploy ECS Service and Scheduled tasks")
        for service in self.all_deploy_target_service_list:
            self.task_queue.put([service, ProcessMode.checkDeployService])
        for scheduled_task in self.deploy_scheduled_task_list:
            self.task_queue.put([scheduled_task, ProcessMode.checkDeployScheduledTask])
        self.task_queue.join()

    def _wait_for_stable(self, service_list: list):
        if len(service_list) > 0:
            for service in service_list:
                self.task_queue.put([service, ProcessMode.waitForStable])
        self.task_queue.join()

    def _result_check(self):
        error_service_list = list(filter(
            lambda service: service.status == ProcessStatus.error, self.all_deploy_target_service_list
        ))
        error_scheduled_task_list = list(filter(
            lambda task: task.status == ProcessStatus.error, self.deploy_scheduled_task_list
        ))
        # エラーが一個でもあれば失敗としておく
        if len(error_service_list) > 0 or len(error_scheduled_task_list) > 0:
            sys.exit(1)
        if self.error:
            sys.exit(1)
Exemplo n.º 28
0
    def run_inputs(self, inputs, print_progress=True):
        """
        Process SCAPS run parameters in parallel. Takes in a dictionary of inputs, structured as
        {'id1':run_params_1, 'id2':run_params_2, ...}
        where run_params_1, run_params_2, ... should be the argument to the pre-specified input processor method.

        Returns a dictionary structured as
        {'id1':output_1, 'id2':output_2, ...}
        where output_1, output_2, ... are the objectrs returned by the pre-specified output processor method
        """

        inq = Queue()
        outq = Queue()
        output_dict = {}
        num_total = len(inputs.keys())
        num_done = 0

        proc_list = []
        config_all = {'SCAPS_ROOT':self.SCAPS_ROOT, 'SCAPS_CMD':self.SCAPS_CMD,
                      'SCAPS_EXEC_DIR':self.scaps_exec_dir, 'INPUT_PROC':self.input_processor,
                      'OUTPUT_PROC':self.output_processor}
        for proc_i in range(self.ncores):
            config_proc = deepcopy(config_all)
            config_proc['CORE'] = proc_i
            proc = Process(target=SCAPSrunner.run_process, args=(config_proc, inq, outq))
            proc.start()
            proc_list.append(proc)

        inputiter = inputs.iteritems()
        while True:
            running = any(proc.is_alive() for proc in proc_list)
            if not running:
                break
            while inq.empty():
                try:
                    (id, input) = inputiter.next()
                    inq.put({'id':id,'calc_param':input})
                except:
                    inq.put({'id':'done'})

            while not outq.empty():
                pt = outq.get()
                output_dict[pt['id']] = pt['output']
                num_done += 1
                if print_progress:
                    print("Finished input ID{} [{}/{} total]".format(pt['id'], num_done, num_total))

        for proc_i, proc in enumerate(proc_list):
            proc.join()

        # Garbage collect
        while not inq.empty():
            inq.get()
        while not outq.empty():
            outq.join()
        # Give queues time to close
        time.sleep(5)

        if not (set(inputs.keys())==set(output_dict.keys())):
            print("Warning: Not all inputs seem to have gotten outputs")

        return output_dict
Exemplo n.º 29
0
def main_no_coro(q: mp.Queue):
    run()
    q.put(None)
    q.join()
Exemplo n.º 30
0
    import queue  # 不能进行多进程之间的数据传输
    (1)from multiprocessing import Queue   借助Queue解决生产者消费者模型
       队列是安全的。
       q = Queue(num)
       num : 队列的最大长度
       q.get()# 阻塞等待获取数据,如果有数据直接获取,如果没有数据,阻塞等待
       q.put()# 阻塞,如果可以继续往队列中放数据,就直接放,不能放就阻塞等待

       q.get_nowait()# 不阻塞,如果有数据直接获取,没有数据就报错
       q.put_nowait()# 不阻塞,如果可以继续往队列中放数据,就直接放,不能放就报错

    (2)from multiprocessing import JoinableQueue#可连接的队列
       JoinableQueue是继承Queue,所以可以使用Queue中的方法
       并且JoinableQueue又多了两个方法
       q.join()# 用于生产者。等待 q.task_done的返回结果,通过返回结果,生产者就能获得消费者当前消费了多少个数据
       q.task_done() # 用于消费者,是指每消费队列中一个数据,就给join返回一个标识。

     2 管道(了解)
         from multiprocessing import Pipe
         con1,con2 = Pipe()
         管道是不安全的。
         管道是用于多进程之间通信的一种方式。
         如果在单进程中使用管道,那么就是con1收数据,就是con2发数据。
                                 如果是con1发数据,就是con2收数据

         如果在多进程中使用管道,那么就必须是父进程使用con1收,子进程就必须使用con2发
                                             父进程使用con1发,子进程就必须使用con2收
                                             父进程使用con2收,子进程就必须使用con1发
                                             父进程使用con2发,子进程就必须使用con1收
         在管道中有一个著名的错误叫做EOFError。是指,父进程中如果关闭了发送端,子进程还继续接收数据,那么就会引发EOFError。
Exemplo n.º 31
0
class PlasmaShmQueue:
    def __init__(self, maxsize: int = 0):
        r"""
        Use pyarrow in-memory plasma store to implement shared memory queue.

        Compared to native `multiprocess.Queue`, `PlasmaShmQueue` avoid pickle/unpickle
        and communication overhead, leading to better performance in multi-process
        application.

        :type maxsize: int
        :param maxsize: maximum size of the queue, `None` means no limit. (default: ``None``)
        """

        # Lazy start the plasma store manager
        global MGE_PLASMA_STORE_MANAGER
        if MGE_PLASMA_STORE_MANAGER is None:
            try:
                MGE_PLASMA_STORE_MANAGER = _PlasmaStoreManager()
            except Exception as e:
                err_info = (
                    "Please make sure pyarrow installed correctly!\n"
                    "You can try reinstall pyarrow and see if you can run "
                    "`plasma_store -s /tmp/mge_plasma_xxx -m 1000` normally.")
                raise RuntimeError(
                    "Exception happened in starting plasma_store: {}\n"
                    "Tips: {}".format(str(e), err_info))
        else:
            MGE_PLASMA_STORE_MANAGER.refcount += 1

        self.socket_name = MGE_PLASMA_STORE_MANAGER.socket_name

        # TODO: how to catch the exception happened in `plasma.connect`?
        self.client = None

        # Used to store the header for the data.(ObjectIDs)
        self.queue = Queue(maxsize)  # type: Queue

    def put(self, data, block=True, timeout=None):
        if self.client is None:
            self.client = plasma.connect(self.socket_name)
        try:
            object_id = self.client.put(data)
        except plasma.PlasmaStoreFull:
            raise RuntimeError("plasma store out of memory!")
        try:
            self.queue.put(object_id, block, timeout)
        except queue.Full:
            self.client.delete([object_id])
            raise queue.Full

    def get(self, block=True, timeout=None):
        if self.client is None:
            self.client = plasma.connect(self.socket_name)
        object_id = self.queue.get(block, timeout)
        if not self.client.contains(object_id):
            raise RuntimeError(
                "ObjectID: {} not found in plasma store".format(object_id))
        data = self.client.get(object_id)
        self.client.delete([object_id])
        return data

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def join(self):
        self.queue.join()

    def disconnect_client(self):
        if self.client is not None:
            self.client.disconnect()

    def close(self):
        self.queue.close()
        self.disconnect_client()
        global MGE_PLASMA_STORE_MANAGER
        MGE_PLASMA_STORE_MANAGER.refcount -= 1
        _clear_plasma_store()

    def cancel_join_thread(self):
        self.queue.cancel_join_thread()
Exemplo n.º 32
0
        while True:
            time.sleep(6)
            #self.test.print_self()
            for sss in self.test.ooos:
                print('keep print {}'.format(sss.status))


if __name__ == '__main__':

    current = test('aaa', 'bbb', 'ccc')

    qqq = Queue(10)

    for aa in current.ooos:
        qqq.put(aa)
    #qqq.put(None)
    threads = []
    gg = keep_print(current)
    gg.start()

    for i in range(3):
        tt = update_thread(qqq)
        tt.start()
        threads.append(tt)

    [ts.join() for ts in threads]
    #gg.join()

    qqq.join()

    print("finished")