コード例 #1
0
def test_sample_out_queue_full_on_sample():
    """When out queue is full samples should be ignored without blocking."""
    dir_name = os.path.dirname(os.path.abspath(__file__))
    source_file = os.path.join(dir_name, '../ai/person.jpg')
    abs_path = os.path.abspath(source_file)
    source_uri = pathlib.Path(abs_path).as_uri()
    _gst_out_queue = Queue(1)
    last_in = 'only one sample allowed in this queue'
    _gst_out_queue.put(last_in)
    assert _gst_out_queue.full()
    _gst_stop_signal = threading.Event()
    _gst_eos_reached = threading.Event()
    svc = _TestGstService4(source_conf={
        'uri': source_uri,
        'type': 'image'
    },
                           out_queue=_gst_out_queue,
                           stop_signal=_gst_stop_signal,
                           eos_reached=_gst_eos_reached)
    svc.run()
    print('Gst service started. Waiting for a sample.')
    _gst_eos_reached.wait(timeout=2)
    assert _gst_eos_reached.is_set()
    assert _gst_out_queue.full()
    first_out = _gst_out_queue.get(timeout=1)
    assert first_out == last_in
コード例 #2
0
def basic_usage():
    q = Queue(3)
    print("q.empty(): %s" % q.empty())
    q.put("message1")
    q.put("message2")
    print("q.full(): %s" % q.full())
    q.put("message3")
    print("q.full(): %s" % q.full())

    try:
        q.put("message4", True, 2)
    except Exception as e:
        print(e)
        print("queue已经满了:%s" % q.qsize())

    try:
        q.put_nowait("message5")
    except Exception as e:
        print(e)
        print("queue已经满了:%s" % q.qsize())

    print(q.get())

    if not q.full():
        q.put("message7")
コード例 #3
0
ファイル: batch_queue.py プロジェクト: saicoco/_practice
class prefetch_queue(object):

    def __init__(self, batch_size, data_dir, phase=True):
        self.producer = tool.av_generator(batch_size, data_dir, train=phase)
        self.queue = Queue(5)

    def produce(self):
        if not self.queue.full():
            self.queue.put(self.producer.next())
        else:
            pass

    def samples(self):
            if not self.queue.empty():
                item = self.queue.get()
                return item

    def ini_queue(self):
        while not self.queue.full():
            print '....'
            self.produce()

    def next(self):
        self.produce()
        return self.samples()
コード例 #4
0
ファイル: test_worker.py プロジェクト: spulec/PyQS
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages():
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {"task": "tests.tasks.index_incrementer", "args": [], "kwargs": {"message": 23}}
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})
    internal_queue.put({"queue": queue.id, "message": message, "start_time": time.time(), "timeout": 30})

    # When I Process messages
    worker = ProcessWorker(internal_queue)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
コード例 #5
0
def test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs(
        os):
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    os.getppid.return_value = 1

    # Setup SQS Queue
    conn = boto3.client('sqs', region_name='us-east-1')
    queue_url = conn.create_queue(QueueName="tester")['QueueUrl']

    # Build the SQS Message
    message = {
        'Body':
        json.dumps({
            'task': 'tests.tasks.index_incrementer',
            'args': [],
            'kwargs': {
                'message': 23,
            },
        }),
        "ReceiptHandle":
        "receipt-1234",
        "MessageId":
        "message-id-1",
    }

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({
        "queue": queue_url,
        "message": message,
        "start_time": time.time(),
        "timeout": 30,
    })
    internal_queue.put({
        "queue": queue_url,
        "message": message,
        "start_time": time.time(),
        "timeout": 30,
    })
    internal_queue.put({
        "queue": queue_url,
        "message": message,
        "start_time": time.time(),
        "timeout": 30,
    })

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL, parent_id=1)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
コード例 #6
0
ファイル: test_worker.py プロジェクト: spulec/PyQS
def test_worker_processes_shuts_down_after_processing_its_max_number_of_msgs():
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto3.client('sqs', region_name='us-east-1')
    queue_url = conn.create_queue(QueueName="tester")['QueueUrl']

    # Build the SQS Message
    message = {
        'Body': json.dumps({
            'task': 'tests.tasks.index_incrementer',
            'args': [],
            'kwargs': {
                'message': 23,
            },
        }),
        "ReceiptHandle": "receipt-1234",
    }

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put(
        {
            "queue": queue_url,
            "message": message,
            "start_time": time.time(),
            "timeout": 30,
        }
    )
    internal_queue.put(
        {
            "queue": queue_url,
            "message": message,
            "start_time": time.time(),
            "timeout": 30,
        }
    )
    internal_queue.put(
        {
            "queue": queue_url,
            "message": message,
            "start_time": time.time(),
            "timeout": 30,
        }
    )

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
コード例 #7
0
def base_trial_protocol_run(trial_q: mp.Queue, condition_q: mp.Queue,
                            success_q: mp.Queue, stimulation_name):
    """
    The function to use in ProtocolProcess class
    Designed to be run continuously alongside the main loop
    Three parameters are three mp.Queue classes, each passes corresponding values
    :param trial_q: the protocol name (inwards); dict of trial from respective experiment
    :param success_q: the result of each protocol (outwards)
    :param condition_q: collects trigger results from trial trigger
    :param stimulus_name: exact name of stimulus function in base.stimulation.py
    """
    current_trial = None
    stimulation = setup_stimulation(stimulation_name)
    # starting the main loop without any protocol running
    while True:
        if trial_q.empty() and current_trial is None:
            pass
        elif trial_q.full():
            current_trial = trial_q.get()
            finished_trial = False
            # starting timers
            current_trial["stimulus_timer"].start()
            current_trial["success_timer"].start()
            print("Starting protocol {}".format(current_trial))
            condition_list = []
            # this branch is for already running protocol
        elif current_trial is not None:
            # checking for stimulus timer and outputting correct image
            if current_trial["stimulus_timer"].check_timer():
                # if stimulus timer is running, show stimulus
                stimulation.start()
            else:
                # if the timer runs out, finish protocol and reset timer
                stimulation.stop()
                current_trial["stimulus_timer"].reset()
                current_trial = None

            # checking if any condition was passed
            if condition_q.full():
                stimulus_condition = condition_q.get()
                # checking if timer for condition is running and condition=True
                if current_trial["success_timer"].check_timer():
                    condition_list.append(stimulus_condition)

            # checking if the timer for condition has run out
            if not current_trial["success_timer"].check_timer(
            ) and not finished_trial:
                # resetting the timer
                print("Timer for condition run out")
                finished_trial = True
                # outputting the result, whatever it is
                success = current_trial["result_func"](condition_list)
                success_q.put(success)
                current_trial["success_timer"].reset()
コード例 #8
0
def test_worker_processes_shuts_down_after_processing_its_maximum_number_of_messages(
):
    """
    Test worker processes shutdown after processing maximum number of messages
    """
    # Setup SQS Queue
    conn = boto.connect_sqs()
    queue = conn.create_queue("tester")

    # Build the SQS Message
    message_body = {
        'task': 'tests.tasks.index_incrementer',
        'args': [],
        'kwargs': {
            'message': 23,
        },
    }
    message = Message()
    body = json.dumps(message_body)
    message.set_body(body)

    # Add message to internal queue
    internal_queue = Queue(3)
    internal_queue.put({
        "queue": queue.id,
        "message": message,
        "start_time": time.time(),
        "timeout": 30
    })
    internal_queue.put({
        "queue": queue.id,
        "message": message,
        "start_time": time.time(),
        "timeout": 30
    })
    internal_queue.put({
        "queue": queue.id,
        "message": message,
        "start_time": time.time(),
        "timeout": 30
    })

    # When I Process messages
    worker = ProcessWorker(internal_queue, INTERVAL)
    worker._messages_to_process_before_shutdown = 2

    # Then I return from run()
    worker.run().should.be.none

    # With messages still on the queue
    internal_queue.empty().should.be.false
    internal_queue.full().should.be.false
コード例 #9
0
ファイル: brokest.py プロジェクト: chiwhalee/brokest
    def start_with_return(self):
        """
            Start listening for tasks.
            用 multiprocessing.Queue 来实现并行化, 这样并行是async的。
            buff is 进程池
        """
        self.socket.bind('tcp://{}:{}'.format(self.host, self.port))
        buff_size = 3
        buff = Queue(buff_size)  
        count = 0
        while True:
            #todo:  不用buff.full 来判断,而用cpu是否空闲判断
            #querry = self.socket.recv_pyobj()
            recv_dict = self.socket.recv_pyobj() 
            header = recv_dict['header']
            
            if not buff.full():
                #if querry == 'querry' : 
                if header == 'querry':  
                    if not buff.full(): 
                        self.socket.send_pyobj('available') 
                    else: 
                        self.socket.send_pyobj('not available') 
                        self.is_available = False 
                        time.sleep(1)
                    #temp = self.socket.recv_pyobj()
                    #runnable_string, args, kwargs = temp  
                elif header == 'run': 
                    runnable_string = recv_dict['runnable_string']
                    runnable = pickle.loads(runnable_string)
                    args = recv_dict['args']
                    kwargs = recv_dict['kwargs']
                    

                    #args= pickle.loads(args)
                    count += 1
                    if self.info>0: 
                        #print 'put in queue count %d'%(count, )
                        print 'put in queue count %d port=%d'%(count, self.port)
                    #buff.put(count)
                    #p=Process(target=run_one_async, args=(runnable, buff, args, kwargs))
                    p=Process(target=run_one_with_return, args=(runnable, buff, args, kwargs))
                    p.start()
                    p.join()   #Block the calling thread until the process whose join() method is called terminates or until the optional timeout occurs.
                    res = buff.get()
                    self.socket.send_pyobj(res) 
                elif header == 'stop':  
                    self.socket.send_pyobj('stop server') 
                    break 
コード例 #10
0
def video_capture(src, q_frame:Queue, q_image:Queue):
    #VideoCapture
    video = cv2.VideoCapture(int(src))

    
    while True:
        s = time.time()
        hasFrame, frame = video.read()



        assert hasFrame, 'read video frame failed.'
        #######print('capture read used {} ms.'.format((time.time() - s) * 1000))


        s = time.time()
        image = cv2.resize(frame, (368, 368))
        ##image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)#处理待定
        ####################print('capture resize used {} ms.'.format((time.time() - s) * 1000))
        ##frameWidth = frame.shape[1]
        ##frameHeight = frame.shape[0]



        s = time.time()
        if q_frame.empty():
            q_frame.put(frame)
        
        if q_image.full():
            print("q_img is full")
            continue
        else:
            q_image.put(image)
コード例 #11
0
class MovingAverage:
    def __init__(self):
        self.myQ = Queue(maxsize=60)
        self.sumElements = 0.0
        self.mean = 0.0
        self.std = 4.5
    def magnitude(x,y,z):
        return math.sqrt(x*x+ y*y+ z*z)
    def addElement(self,element):
        oldElement = 0
        if (self.myQ.full()):
            oldElement = self.myQ.get() # remove from end
        self.myQ.put(element) # add to front
        self.sumElements = element + self.sumElements - oldElement# change sum
        self.mean = self.sumElements/self.myQ.qsize()

    def getStandardDeviation(self):
        #if (not self.myQ.full()): # if the queue is full, we want to calculate std
        #    return -1 # queue must be full
        #else:
        #    if (not self.myQ.empty()): # We know the queue is full and never empty
        index = self.myQ.qsize()
        if (index > 1):    
            var = 0.0
            for i in range(index):
                element = self.myQ.get()
                var = var+math.pow((element - self.mean),2)
                self.myQ.put(element)
            var = var/(self.myQ.qsize()-1.0)
            self.std = math.sqrt(var)
            return self.std
        return -1
コード例 #12
0
def generate_routes():
    logging.info('Start of route generation')
    number_of_processes = 8
    route_queue = Queue(maxsize=20000)
    sql = 'SELECT id, start_point, end_point FROM de_sim_routes'
    threading.Thread(target=_queue_feeder, args=(sql, route_queue, 20000, number_of_processes)).start()

    with connection.get_connection() as conn:
        cur = conn.cursor()
        cur.execute('SELECT COUNT(id) FROM de_sim_routes')  # execute 1.7 Secs
        rec = cur.fetchone()
        counter = Counter(rec[0])

    while not route_queue.full():
        time.sleep(0.2)

    start = time.time()
    processes = []
    for i in range(number_of_processes):
        p = ProcessRouteCalculation(route_queue, counter)
        processes.append(p)
        processes[-1].start()

    for p in processes:
        p.join()

    end = time.time()
    logging.info('Runtime Route Generation: %s', (end - start))
コード例 #13
0
  def run(self, count=Arbitrary.TEST_COUNT):
    print('start test.')

    if self.process > 1:
      from multiprocessing import Queue
    else:
      from queue import Queue

    runner = PropRunner(count)
    queue = Queue(maxsize=len(PyQCheck.TEST_STEP))
    if self.process > 1:
      # multi process
      PyQWorker().set([
        Process(
          target=runner.run, args=(test,), kwargs={"queue": queue})         for test in PyQCheck.TEST_STEP
      ]).start(self.process)
    else:
      # linear
      for test in PyQCheck.TEST_STEP:
        runner.run(test, queue=queue)

    length = len(PyQCheck.TEST_STEP)
    while True:
      if queue.full():
        print('finish.')
        for i in range(length):
          self.results.append(queue.get())
        return self
コード例 #14
0
def simple_protocol_run(trial_q: mp.Queue, success_q: mp.Queue, trials: dict):
    """
    The function to use in ProtocolProcess class
    Designed to be run continuously alongside the main loop
    Three parameters are three mp.Queue classes, each passes corresponding values
    :param trial_q: the protocol name (inwards)
    :param success_q: the result of each protocol (outwards)
    :param trials: dict of possible trials
    """
    current_trial = None
    # starting the main loop without any protocol running
    while True:
        if trial_q.empty() and current_trial is None:
            pass
        elif trial_q.full():
            current_trial = trial_q.get()
            print(current_trial)
            # this branch is for already running protocol
        elif current_trial is not None:
            print("Stimulating...")
            current_trial = None
            success_q.put(True)
            deliver_liqreward()
            time.sleep(3.5)
            deliver_liqreward()
コード例 #15
0
class Q:
    def __init__(self, qsize):
        self.q = Queue(qsize)

    def write(self, v):
        try:
            self.q.put_nowait(v)
        except queue.Full:
            pass

    def read(self):
        try:
            d = self.q.get_nowait()
            return d
        except queue.Empty:
            return None

    def empty(self):
        return self.q.empty()

    def full(self):
        return self.q.full()

    def qsize(self):
        return self.q.qsize()
コード例 #16
0
class Am:
    def __init__(self, queue_lens):
        self.queue = Queue(queue_lens)
        self.cnt = 0

    def put(self):
        print 'start put'
        while True:
            if self.queue.full():
                print 'queue is full'
                time.sleep(0.5)
                continue
            else:
                self.queue.put(self.cnt)
                self.cnt += 1
                time.sleep(0.1)

    def get(self):
        print 'start get'
        while True:
            if self.queue.empty():
                print 'queue is empty'
                time.sleep(0.1)
            else:
                print self.queue.get()
                # print self.cnt
                time.sleep(1)
コード例 #17
0
ファイル: client.py プロジェクト: kyehyukahn/scp-prototype
def send_message_multiple(message, *endpoints, same_message_id=False):
    message_id = None
    if same_message_id is True:
        message_id = get_uuid()

    if message_id is not None and message is not None:
        message.message_id = message_id

    q = Queue(maxsize=len(endpoints))
    for endpoint in endpoints:
        p = Process(
            target=_send_message_multiple,
            args=(q, message, endpoint),
            kwargs=dict(message_id=message_id),
        )
        p.start()

    while not q.full():
        pass

    messages = list()
    for i in endpoints:
        messages.extend(map(lambda x: (x, i), q.get()))

    return messages
コード例 #18
0
def basic_usage():
    q = Queue(3)  # 指定队列大小,如果不写默认无限
    q.put('消息1')
    q.put('消息2')
    q.put('消息3')
    # q.put('消息4') # 一直等待直到进入
    if not q.full():
        q.put('消息5', block=True, timeout=1)  # 等待1s,如果还没有put成功,直接抛异常
    print('判断队列是否已满: %s' % q.full())
    print(q.get())  # 获取并删除
    print(q.get())
    print(q.get())
    # print(q.get()) # 一直等待获取
    if not q.empty():
        print(q.get(block=True, timeout=1))  # 等待获取,超时1s,则抛异常
    print('判断队列是否为空: %s' % q.empty())
コード例 #19
0
def main():
    tags_queue = Queue()
    unique_tags = set()

    reader_worker = Process(target=read_tags_worker, args=(tags_queue, ))

    logger.info("Starting Read Worker")
    reader_worker.start()
    try:
        while True:
            item = tags_queue.get()
            if item is None:
                break

            unique_tags.add(item)
    except KeyboardInterrupt:
        logger.info("Keyboard Interupt in Main Thread")
        logger.info("Joining Read Worker")
        reader_worker.join()
        logger.info("Emptying Queue")
        while tags_queue.full():
            item = tags_queue.get()
            if item is None:
                break

            unique_tags.add(item)
    finally:
        print(unique_tags)
        print(len(unique_tags))
        logger.info("Main Thread exiting")
        for handler in logger.handlers:
            handler.close()
            logger.removeFilter(handler)
コード例 #20
0
ファイル: bot_cctv11.py プロジェクト: I0T/EPGT
def main():
    run_queue = Queue(50)

    processes = [
        Process(target=run_task, args=(run_queue, )) for i in range(20)
    ]
    for p in processes:
        p.daemon = True
        p.start()

    signal.signal(signal.SIGTERM, SignalTERM)

    db = getDb()
    channel_list = db.query(
        'SELECT * FROM channel where code="cctv11" ORDER BY id').fetchall()
    channel_len = len(channel_list)
    i = 1
    for channel in channel_list:
        while run_queue.full():
            # 若任务队列已满,则等待
            time.sleep(3)
        config = channel['config']
        try:
            config = json.loads(config)
            run_queue.put({'code': channel['code'], 'config': config})
            print 'Queue: %s/%s' % (i, channel_len)
            i = i + 1
        except:
            print 'error'
            print config

    for i in range(20):
        run_queue.put("STOP")
    for p in processes:
        p.join()
コード例 #21
0
ファイル: bot-zjws.py プロジェクト: I0T/EPGT
def main():
    run_queue = Queue(50)
    
    processes = [Process(target=run_task, args=(run_queue,))
                     for i in range(20)]
    for p in processes:
        p.daemon = True
        p.start()
    
    signal.signal(signal.SIGTERM, SignalTERM)

    db = getDb()
    channel_list = db.query('SELECT * FROM channel where where id = 833 ORDER BY id').fetchall()
    channel_len = len(channel_list)
    i = 1
    for channel in channel_list:
        while run_queue.full():
            # 若任务队列已满,则等待
            time.sleep(3)
        config = channel['config']
        try:
            config = json.loads(config)
            run_queue.put({'code': channel['code'], 'config': config, 'type': channel['type'], 'province': channel['province'], 'city': channel['city']})
            print 'Queue: %s/%s' % (i, channel_len)
            i = i + 1
        except:
            print 'error'
            print config

    for i in range(20):
        run_queue.put("STOP")
    for p in processes:
        p.join()
コード例 #22
0
def video_capture(q_frame: Queue, q_image: Queue, flag):
    video = cv2.VideoCapture(0)
    print("video.isOpened()={}", video.isOpened())
    try:
        while True:
            if flag.value == 20:
                if video.isOpened():
                    video.release()
                    print("video release!")
                print("exit video_capture!")
                break
            s = time.time()
            ret, frame = video.read()
            assert ret, 'read video frame failed.'
            #print('capture read used {} ms.'.format((time.time() - s) * 1000))

            s = time.time()
            image = cv2.resize(frame, (416, 416))
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            #print('capture resize used {} ms.'.format((time.time() - s) * 1000))

            s = time.time()
            if q_frame.empty():
                q_frame.put(frame)
            if q_image.full():
                continue
            else:
                q_image.put(image)
            #print("capture put to queue used {} ms".format((time.time()-s)*1000))
    except KeyboardInterrupt:
        video.release()
        print("exit video_capture!")
コード例 #23
0
ファイル: __init__.py プロジェクト: stat-ml/ncvis-examples
 def run(self, processes=None, progress=True, delay=0.2):
     tasks = Queue()
     size = len(self.tasks)
     results = Queue(maxsize=size)
     
     def init():
         worker = self.worker_class(*self.worker_args, **self.worker_kwargs)
         while True:
             t = tasks.get()
             results.put(worker(t))
     
     def load_queue():
         for t in self.tasks:
             tasks.put(t)
     p = Process(target=load_queue)
     p.start()
       
     pool = Pool(processes=processes, initializer=init)
     if progress:
         with progressbar.ProgressBar(max_value=size, prefix=self.message) as bar:
             while not results.full():
                 bar.update(results.qsize())
                 time.sleep(delay)
     
     res = [results.get() for i in range(size)]
     
     p.terminate()
     pool.terminate()
     return [r for r in res if r is not None]
コード例 #24
0
class QueueWrapper(object):
    def __init__(self):
        self.queue = Queue()
        self.container = set()
        self.lock_container = Lock()

    def put(self, item):
        with self.lock_container:
            for elem in self.container:
                if item[0] == elem[0]:
                    return False
            self.container.add(item)
            self.queue.put(item)
            return True

    def get(self):
        item = self.queue.get()
        with self.lock_container:
            self.container.remove(item)
        return item

    def qsize(self):
        return self.queue.qsize()

    def empty(self):
        return self.queue.empty()

    def full(self):
        return self.queue.full()
コード例 #25
0
def annotation_vcf(parsed_args, process_num):
    records, results = Queue(100 * process_num), Queue()
    input_finished = False
    output_finished = False
    wait_records = dict()
    processes = list()
    records_id = count()
    for i in range(process_num):
        p = Process(target=score_vcf,
                    args=(records, results, parsed_args.annotation))
        processes.append(p)
        p.start()
    vcf_reader = vcf.Reader(filename=parsed_args.file_in)
    vcf_reader.infos['dbscSNV'] = VcfInfo(
        'dbscSNV',
        vcf_field_counts['A'],
        'String',
        'dbscSNV Score for VCF record alleles, Format: ALLELE|ada_score|rf_score',
        version=None,
        source=None)
    vcf_writer = vcf.Writer(open(parsed_args.file_out, 'w'), vcf_reader)
    while True:
        while not records.full() and not input_finished:
            try:
                record = next(vcf_reader)
                record_id = next(records_id)
                wait_records[record_id] = record
                record_infos = list()
                chromosome = str(record.CHROM)
                pos = record.POS
                ref = record.REF
                for alt in record.ALT:
                    record_infos.append(
                        VariantRecord(chromosome, pos, ref, str(alt)))
                records.put((record_id, record_infos))
            except StopIteration:
                input_finished = True
                records.put('END')
                break
        processes_status = list()
        for p in processes:
            processes_status.append(p.is_alive())
        if True not in processes_status:
            results.put('END')
        while True:
            try:
                result = results.get(False)
            except queue.Empty:
                break
            if result != 'END':
                record_id, record_score = result[0], result[1]
                record_write = wait_records.pop(record_id)
                record_write.add_info('dbscSNV', record_score)
                vcf_writer.write_record(record_write)
            else:
                output_finished = True
                break
        if output_finished:
            break
    vcf_writer.close()
コード例 #26
0
class BufferedVideoProcessor(object):
    """A multithreaded video processor that runs a series of
  functions on each frame from video_stream."""
    def __init__(self, video_stream, functions, buffer_capacity=12):
        self._video_stream = video_stream
        self._input_queue = Queue(buffer_capacity)

        # Build a chain of frame processors.
        last_queue = self._input_queue
        self._threads = []
        for func in functions:
            proc = FrameProcessor(func, last_queue)
            self._subprocessess.append(proc)
            last_queue = proc.output_queue

        self._output_queue = last_queue

    def update(self,
               frame_processor_func=lambda frame: frame,
               max_frames=None):
        """Read frames, store in input queue, and run
    callback() on each frame in separate threads."""

        frames_processed = 0
        while not max_frames or frames_processed < max_frames:
            if not self._input_queue.full():
                frame, frame_idx = self._video_stream.captureFrame(
                    display=False)
                self._input_queue.put((frame_idx, frame))

        self._output_queue.join()
コード例 #27
0
def parallel_annotate_to_bed(opts, trans_provided_no_acc, process_num):
    # 并行注释, 输出bed格式结果
    chrome_dic = generate_chrome_dic(opts.annotation)
    fp = create_annotate_result(opts.file_out)
    records, results = Queue(100 * process_num), Queue()
    input_finished = False
    output_finished = False
    processes = list()
    for i in range(process_num):
        p = Process(target=process_record_to_bed,
                    args=(records, results, opts.annotation,
                          trans_provided_no_acc, opts.how))
        processes.append(p)
        p.start()
    if opts.file_format == 'excel':
        df = pd.read_excel(opts.file_in, skiprows=opts.skip_rows)
    else:
        df = pd.read_csv(opts.file_in,
                         sep='\t',
                         low_memory=False,
                         skiprows=opts.skip_rows,
                         dtype={'#Chr': str})
    df.rename(columns={'#Chr': 'Chr'}, inplace=True)
    while True:
        while not records.full() and not input_finished:
            try:
                record = next(df.itertuples())
                chrome = getattr(record, 'Chr')
                start = getattr(record, 'Start')
                stop = getattr(record, 'Stop')
                ref = getattr(record, 'Ref')
                call = getattr(record, 'Call')
                var_type = judge_var_type(ref, call)
                record_parser = VariantRecord(chrome, start, stop, ref, call,
                                              var_type)
                g = generate_g(record_parser, chrome_dic)
                records.put((record_parser, g))
            except StopIteration:
                input_finished = True
                records.put('END')
                break
        processes_status = list()
        for p in processes:
            processes_status.append(p.is_alive())
        if True not in processes_status:
            results.put('END')
        while True:
            try:
                result = results.get(False)
            except queue.Empty:
                break
            if result != 'END':
                write_annotate_result(fp, result)
            else:
                output_finished = True
                break
        if output_finished:
            break
    fp.close()
コード例 #28
0
class Manager(object):
    def __init__(self, workers=-1):
        self.cancelled = False
        self._workers = Queue(workers)
        self._queue = Queue()
        self._processes = []
        self._thread = mp.Process(target=self._run)

    def add_worker(self, worker):
        if self._workers.full():
            raise Exception('Worker pool full')
        self._workers.put_nowait(worker)

    def queue(self, conversation):
        self._queue.put(conversation)

    def has_pending(self):
        return not self._queue.empty()

    def start(self):
        self._thread.start()

    def stop(self):
        self.cancelled = True
        for process in self._processes:
            process.terminate()

        start = time()
        while self._thread.is_alive():
            if (time() - start) > 30:
                self._thread.terminate()
                break
            sleep(1)

    def work_complete(self, worker):
        self._workers.put_nowait(worker)

    def _run(self):
        while True:
            if self.cancelled:
                break
            try:
                worker = self._workers.get_nowait()
            except:
                print('No worker available.')
                sleep(1)
                break
            try:
                job = self._queue.get_nowait()
            except:
                print('No job available.')
                self._workers.put(worker)
                sleep(1)
                break

            process = mp.Process(target=worker.handle, args=(self, job,))
            self._processes.append(process)
            process.start()
コード例 #29
0
class DataGenerator:
    def __init__(self):
        self.processes = []
        self.queue = Queue(10)
        self.threads_count = 4

    # A generator yielding an audio array, and indices and lables for
    # building a sparsetensor describing labels for CTC functions
    def seq_generator(self, seq_length, framerate, chunk, sr, mel_count):
        def do_work():
            while True:
                audio, labels = generate_seq(seq_length, framerate)
                audio = np.reshape(audio, (seq_length // chunk, chunk))
                # audio = (audio - np.mean(audio)) / np.std(audio) # Normalization
                audio = audio.astype(np.float32)
                mel = self.get_wave_mel_features(audio, sr, mel_count)
                labels = np.asarray([MORSE_CHR.index(l[0]) for l in labels])

                while self.queue.full():
                    time.sleep(0.0100)

                self.queue.put((audio, labels, mel))

        self.start_threads(do_work)

        while True:
            audio, labels, mel = self.queue.get()

            label_indices = []
            label_values = []
            for i, value in enumerate(labels):
                label_indices.append([i])
                label_values.append(value)

            yield (audio, label_indices, label_values, [len(labels)], mel)

    def start_threads(self, do_work):
        if len(self.processes) != self.threads_count:
            for _ in range(self.threads_count):
                p = Process(target=do_work)
                p.daemon = True
                self.processes.append(p)
                p.start()

    @staticmethod
    def get_wave_mel_features(wave, sr, mel_count):
        wave = wave.reshape(SEQ_LENGTH)
        wave = librosa.util.normalize(wave)
        mel = librosa.feature.melspectrogram(wave,
                                             sr=sr,
                                             n_fft=250,
                                             n_mels=mel_count,
                                             hop_length=200,
                                             power=2)
        mel = mel.T
        mel = mel / np.max(mel)
        # mel = np.round(mel, decimals=4)
        return mel
コード例 #30
0
 def Start_Processing(self, queue_cisco: multiprocessing.Queue,
                      queue_topsec: multiprocessing.Queue):
     """
     :param self:
     :type self:
     :param queue_cisco:
     :type queue_cisco:
     :param queue_topsec:
     :type queue_topsec:
     """
     while True:
         if queue_cisco.full() and queue_topsec.full():
             df_cisco = queue_cisco.get()
             df_topsec = queue_topsec.get()
             self.dict_group = queue_topsec.get()
             try:
                 df_cisco_transfer = self.Process_DF_withGroupDict(
                     self, df_cisco, group_dict=self.dict_group)
                 df_topsec_transfer = self.Process_DF_withGroupDict(
                     self, df_topsec, group_dict=self.dict_group)
                 result_df = self.cisco_compare_topsec(
                     self,
                     df_cisco_transfer=df_cisco_transfer,
                     df_cisco=df_cisco,
                     df_topsec_transfer=df_topsec_transfer,
                     df_topsec=df_topsec)
                 result_df.to_csv(
                     config.default_config_dict["default"].result_file_name,
                     sep=',',
                     header=config.default_config_dict["default"].df_format,
                     index=True)
             except (TypeError, RuntimeError, NameError) as err:
                 print("error occur:" % err)
             finally:
                 config.Logger.complete_show("Successfully Run Comparison!")
                 config.Logger.complete_show(
                     "Please Check ./output/result.csv for details")
                 config.Logger.info_show(" ")
                 config.Logger.info_show(
                     "##################################################")
                 config.Logger.info_show(
                     "Thanks for your Usage... Any Questions Please Email: [email protected]"
                 )
                 config.Logger.info_show("Developed By DoHeras@June 2021")
                 break
コード例 #31
0
        def conv_proc(wav_input, output_dict):
            PRE_FRAME_NUM = 5
            noise_energy = []
            for i in range(10):
                noise = queue.get(True).astype(np.float32)
                noise_energy.append(np.sum(noise * noise))
                #print(noise)
            avg = np.average(noise_energy)
            variance = np.var(noise_energy)
            print('Energy:', noise_energy)
            print('Avg:', avg)
            print('Var:', variance)
            threshold = avg + 10 * np.sqrt(variance)
            print("THRESHOLD =", threshold)
            leading_frame = Queue(PRE_FRAME_NUM)
            recording = False
            state = 0
            tailing_cnt = 0
            rec = []
            cnt = 0
            while True:
                wav = wav_input.get(True).astype(np.float32)
                energy = np.sum(np.square(wav))
                cnt += 1
                #if cnt % 10 == 0:
                #    print("CNT:", cnt, energy, energy - threshold)

                if energy > threshold:
                    #print("Fire")
                    if state < 4:
                        state += 1
                        #print('Pre')
                    else:
                        recording = True
                        print('Voice Stream Start')
                        state = 8
                else:
                    if state > 0:
                        state -= 1
                        #print("Post", state)
                    else:
                        #print("Finish")
                        if recording:
                            print("Voice Stream Paused")
                        rec.clear()
                        recording = False
                if recording:
                    while not leading_frame.empty():
                        mfcc = FeatureExtractors.mfcc_extractor(
                            leading_frame.get(True))
                        output_dict['mfcc'].put(mfcc)
                    mfcc = FeatureExtractors.mfcc_extractor(wav)
                    output_dict['mfcc'].put(mfcc)
                else:
                    if leading_frame.full():
                        leading_frame.get(True)
                    leading_frame.put(wav)
コード例 #32
0
ファイル: ex_5.py プロジェクト: UnLuckyAki/examples
def proc_func(my_queue: mp.Queue):
    for i in range(10):
        time.sleep(1)
        # put(obj[, block[, timeout]])
        if i <= 4 and not my_queue.full():
            my_queue.put(
                f"{i}: from {mp.current_process().name}")  # [block[, timeout]]
        if i == 4:
            my_queue.close()
コード例 #33
0
ファイル: test.py プロジェクト: fagan2888/iterable_queue
	def test_ProducerQueue_delegation(self):

		queue = Queue()
		producer_queue = ProducerQueue(queue)
		message = 'yo'
		queue_timeout = 0.1


		# The producer_queue does not implement `get()`
		queue.put(message)
		with self.assertRaises(NotImplementedError):
			producer_queue.get()

		with self.assertRaises(NotImplementedError):
			producer_queue.get_nowait()

		# The message is still on the queue
		self.assertTrue(queue.get(timeout=queue_timeout), message)

		# The producer_queue does implement `put`
		producer_queue.put(message)
		self.assertEqual(queue.get(), message)

		producer_queue.put_nowait(message)
		self.assertEqual(queue.get(), message)

		# Tests below are commented out because `get()` has been removed
		# from the implementation of ProducerQueue to preserve signaling and
		# management semantics.
		#
		# The next three assertions test that Empty is raised for various
		# kinds of `get` call
		#with self.assertRaises(Empty):
		#	producer_queue.get(timeout=queue_timeout)
		#
		#with self.assertRaises(Empty):
		#	producer_queue.get_nowait()
		#
		#with self.assertRaises(Empty):
		#	producer_queue.get(block=False)

		# The next three assetions check that calls which return information
		# return the same values when called on ProducerQueue as on Queue
		# The first test is a bit of an exception:
		# Queue.qsize() may raise a NotImplementedError, if so, just make
		# sure that ProducerQueue does too.  Otherwise, make sure they
		# return the same value.
		try:
			queue.qsize()
		except NotImplementedError:
			with self.assertRaises(NotImplementedError):
				producer_queue.qsize()
		else:
			self.assertEqual(producer_queue.qsize(), queue.qsize())

		self.assertEqual(producer_queue.empty(), queue.empty())
		self.assertEqual(producer_queue.full(), queue.full())
コード例 #34
0
def download_from_web(queue: multiprocessing.Queue):
    data = [11, 22, 33, 44]
    for temp in data:
        if not queue.full():
            sleep(1)
            print(f'生产数据%s到队列中.' % str(temp))
            queue.put(temp)

    print('-- 下载完成并放入到队列中 --')
コード例 #35
0
class SerialManagerThread(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self._stopper = threading.Event()

        self.queue_read = Queue(maxsize=10)
        self.queue_write = Queue(maxsize=10)

        self.thread1 = None
        self.thread2 = None

    def stop(self):
        self._stopper.set()

    def stopped(self):
        return self._stopper.isSet()

    def open_com(self, portName, baudRate):
        self.ser = serial.Serial(portName, baudRate, timeout=3)
        self.thread1 = serialReadThread(1, "SERIAL_READ", self.ser,
                                        self.queue_read)
        self.thread2 = serialWriteThread(2, "SERIAL_WRITE", self.ser,
                                         self.queue_write)
        self.thread1.start()
        self.thread2.start()

    def close_com(self):
        if self.thread1 is not None:
            self.thread1.stop()
            self.thread1.join()
        if self.thread2 is not None:
            self.thread2.stop()
            self.thread2.join()

    def run(self):
        global queue_serial_receive, queue_serial_send, queue_serial_gui
        global serialcom

        while True:
            time.sleep(0.01)
            # get data from serial port
            if self.queue_read.empty() == False:
                sdata = self.queue_read.get(block=False)
                if queue_serial_receive.full() == False:
                    queue_serial_receive.put(sdata)
                if queue_serial_gui.full() == False:
                    queue_serial_gui.put(sdata)
            # send data to serial port
            if queue_serial_send.empty() == False:
                sdata = queue_serial_send.get(block=False)
                if self.queue_write.full() == False:
                    self.queue_write.put(sdata)

            if self.stopped():
                self.close_com()
                break
コード例 #36
0
def ring_created(args, data, labels, queue_in:Queue, queue_out:Queue, mbs, Pmbs):
    """
    :param args:
        dataset
        verify-method
    :return:
    """
    """ init parameter"""
    N_pop=args['N_pop']

    """ init functions"""
    init_population = pg_rule_set
    crossover=gen_crossover_fun(args)
    mutation=gen_mutation_fun(args)
    selection = gen_selection_fun(args)
    P_m = args['P_m']
    """ init population """
    pop=init_population(args['init_N_pop'], labels, mbs, args['N_rule'], Pmbs, args)
    pop=selection(pop)
    """ begin evolution"""
    for g in range(args['stop_generation']):
        offsprings=[]
        b=time.time()
        while len(offsprings)!=N_pop:
            offspring=crossover(pop)
            offspring=mutation(offspring)
            offspring, rj_patterns, er_patterns = evaluate(offspring, labels, mbs, args)
            if offspring is None:
                continue
            if random.random() < P_m:
                # consider they are the same
                mr_patterns = rj_patterns + er_patterns
                offspring = simf_gbml(offspring, mr_patterns, Pmbs, labels, args, mbs)
                if offspring is None:
                    continue
            if inPop(offspring, pop):
                continue
            offsprings.append(offspring)
        pop.extend(offsprings)
        pop=selection(pop)[:args['N_pop']+args['migration_num']]
        try:
            ring_migraion(pop, args, g, queue_in, queue_out)
        except:
            pass
        pop=pop[:args['N_pop']]
        print('in main', time.time()-b)
        g+=1
    try:
        queue_out.put('begin')
        for i in pop:
            while queue_out.full():
                time.sleep(1e-3)
            queue_out.put(i)
        queue_out.put('end')
    except:
        pass
コード例 #37
0
ファイル: GameManager.py プロジェクト: wangdrew/e-foosball
class GameManager():
    event_handlers = []

    def __init__(self):
        self.arduino = None
        self.event_thread = None
        self.event_q = Queue(1)
        atexit.register(self.cleanup())
        self.register_event_handlers()

    def register_event_handlers(self):
        self.event_handlers = [SoundEventHandler()]  # only one handler for now

    def connect_to_arduino(self, serial_addr):
        try:
            self.arduino = serial.Serial(serial_addr,
                                         baudrate=9600,
                                         bytesize=serial.EIGHTBITS,
                                         parity=serial.PARITY_NONE,
                                         stopbits=serial.STOPBITS_ONE)

        except Exception as e:
            print("error opening serial connection")
            raise e

    def poll_serial(self, q):
        while True:
            ascii_line = self.arduino.readline()
            if "e:" in ascii_line:
                q.put(ascii_line[2:])

        
    def run(self):
        self.event_thread = Process(target=self.poll_serial, args=(self.event_q,))
        self.event_thread.start()

        while True:
            if self.event_q.full():
                event = self.event_q.get()
                for h in self.event_handlers:
                    h.process_event(event)


    def cleanup(self):
        try:
            if self.event_thread:
                self.event_thread.terminate()

            if self.arduino:
                self.arduino.close()

            for h in self.event_handlers:
                h.cleanup()

        except Exception as e:
            print("Cleanup exception: " + str(e))
コード例 #38
0
ファイル: sources.py プロジェクト: alexoneill/TerraMotus
class DataSource(threading.Thread):
  """
  A class to represent a stream of data that supplies a 2D array of data.

  The class has a few standard methods which are intended to be over-written,
  all of which are methods from the super class, Thread. It is intended to be
  run in parallel to simulation, and it has a Queue to safely access the data
  that is created from the class.

  This class is intended to be fed to the Converter class.
  """

  def __init__(self, queueMax = 1):
    """
    Init the DataSource as a Thread with a Queue.

    Keyword Arguments:
    queueMax -- The maximum number of data points to be held within the Queue
      (default 1)
    """

    self.queueMax = queueMax
    self.queue = Queue(queueMax)

    self.error = False
    threading.Thread.__init__(self)

  def add(self, data):
    """
    Add a point of data to the Queue.

    Make sure the Queue is not full before adding the new data.

    Keyword Arguments:
    data -- The data to be added to the Queue
    """

    if(not(self.queue.full())): self.queue.put(data)

  def get(self):
    """
    Get a point of data from the Queue.

    Make sure the Queue is not empty before removing a data point from the
    Queue.

    Returns:
      The least recent data-point in the Queue. (first in, first out)
    """

    if(not(self.queue.empty())): return self.queue.get()
コード例 #39
0
ファイル: ydict.py プロジェクト: iblis17/zdict
def browse():
    wordlist = list(db.items())
    size = len(wordlist)
    totalcount = 0.0
    right = 0.0
    lookup = Queue(maxsize=int(prefetch))
    answer = Queue(maxsize=int(prefetch))
    lookuper = Process(target=answers, args=(lookup, answer))
    lookuper.daemon = True
    lookuper.start()

    if size <= 1:
        print("There must be at least two words needed in the list.")
        exit()
    i = 0
    while(1):
        while(not lookup.full()):
            k = wordlist[i][0]
            i = i + 1
            if i >= size:
                i = 0
            k = k.lower()
            lookup.put(k)
        result = answer.get()
        k = result.key.text
        if k not in db:
            continue
        print(result.show())
        speak(result)

        try:
            word = input("(d) Delete, (enter) Continue: ")
            if word == "d":
                del db[k]
                wordlist = list(db.items())
                size = len(wordlist)
                if size <= 1:
                    print("There must be at least two words "
                          "needed in the list.")
                    exit()
        except KeyboardInterrupt:
            result(right, totalcount)
コード例 #40
0
ファイル: queue.py プロジェクト: antiface/spectral
class SafeQueue:
    """ Safe Queue implementation is a wrapper around standard multiprocessing
        queue. Implements safe queuing and dequeueing. """

    def __init__(self, size=10):
        self._queue = Queue(size)
        self._lock = Lock()

    def queue(self, inp):
        self._lock.acquire()
        if self._queue.full():
            self._queue.get()
        self._queue.put_nowait(inp)
        self._lock.release()

    def dequeue(self):
        self._lock.acquire()
        item = None
        if not self._queue.empty():
            item = self._queue.get_nowait()
        self._lock.release()
        return item
コード例 #41
0
ファイル: wiki_bot.py プロジェクト: I0T/EPGT
def main():
    f = file('Forenotice.CSV', 'r')
    run_queue = Queue(50)
   
    for i in range(20):
        p = Process(target=run_task, args=(run_queue,))
        p.daemon = True
        p.start()
   
    i = 104
    while 1:
        while run_queue.full():
            time.sleep(3)
        
        line = f.readline()
        if not line:
            break
        id, name = line.split(',', 1)
        run_queue.put({'id': id, 'name': name}) 
        print "Queue: %s / %s" % (id, '73380')

    while 1:
        time.sleep(3)
コード例 #42
0
class WorkerProcess(object):
    def __init__(self, idnum, topic, collname, in_counter_value, out_counter_value,
                 drop_counter_value, queue_maxsize,
                 mongodb_host, mongodb_port, mongodb_name, nodename_prefix):
        self.name = "WorkerProcess-%4d-%s" % (idnum, topic)
        self.id = idnum
        self.topic = topic
        self.collname = collname
        self.queue = Queue(queue_maxsize)
        self.out_counter = Counter(out_counter_value)
        self.in_counter  = Counter(in_counter_value)
        self.drop_counter = Counter(drop_counter_value)
        self.worker_out_counter = Counter()
        self.worker_in_counter  = Counter()
        self.worker_drop_counter = Counter()
        self.mongodb_host = mongodb_host
        self.mongodb_port = mongodb_port
        self.mongodb_name = mongodb_name
        self.nodename_prefix = nodename_prefix
        self.quit = Value('i', 0)

        self.process = Process(name=self.name, target=self.run)
        self.process.start()

    def init(self):
        global use_setproctitle
	if use_setproctitle:
            setproctitle("mongodb_log %s" % self.topic)

        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()

        rospy.init_node(WORKER_NODE_NAME % (self.nodename_prefix, self.id, self.collname),
                        anonymous=False)

        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self.enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                print("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
            except rospy.ROSInitException:
                print("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1,10))
                self.subscriber = None

    def run(self):
        self.init()

        print("ACTIVE: %s" % self.name)

        # run the thread
        self.dequeue()

        # free connection
        # self.mongoconn.end_request()

    def is_quit(self):
        return self.quit.value == 1

    def shutdown(self):
        if not self.is_quit():
            #print("SHUTDOWN %s qsize %d" % (self.name, self.queue.qsize()))
            self.quit.value = 1
            self.queue.put("shutdown")
            while not self.queue.empty(): sleep(0.1)
        #print("JOIN %s qsize %d" % (self.name, self.queue.qsize()))
        self.process.join()
        self.process.terminate()

 


    def qsize(self):
        return self.queue.qsize()

    def enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.drop_counter.increment()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            #self.queue.put((topic, data, current_time or datetime.now()))
            self.queue.put((topic, data, rospy.get_time()))
            self.in_counter.increment()
            self.worker_in_counter.increment()

    def dequeue(self):
        while not self.is_quit():
            t = None
            try:
                t = self.queue.get(True)
            except IOError:
                # Anticipate Ctrl-C
                #print("Quit W1: %s" % self.name)
                self.quit.value = 1
                break
            if isinstance(t, tuple):
                self.out_counter.increment()
                self.worker_out_counter.increment()
                topic = t[0]
                msg   = t[1]
                ctime = t[2]

                if isinstance(msg, rospy.Message):
                    doc = ros_datacentre.util.msg_to_document(msg)
                    doc["__recorded"] = ctime or datetime.now()
                    doc["__topic"]    = topic
                    try:
                        #print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
                        #pprint.pprint(doc)
                        self.collection.insert(doc)
                    except InvalidDocument, e:
                        print("InvalidDocument " + current_process().name + "@" + topic +": \n")
                        print e
                    except InvalidStringData, e:
                        print("InvalidStringData " + current_process().name + "@" + topic +": \n")
                        print e

            else:
コード例 #43
0
ファイル: converter.py プロジェクト: alexoneill/TerraMotus
class Converter(multiprocessing.Process):
  """
  A class to convert a stream of data to the required data structures for
  ODE and OpenGL.

  This takes a instance of a DataSource and converts the data it supplies
  in its Queue to a usable type for ODE and OpenGL. This also saves data
  from the Kinect DataSource as a CSV file after the transformations.


  This also applies some transformations on the data, given its type:
    Kinect Data: Removes errors, flips the surface and averages the data
    CSV Data: Pipes the data directly to the conversion methods without
    transformation.
  """

  # The value that the Kinect returns to represent errors
  errorVal = 2047.0

  def __init__(self, dataSource, mapDir = "", queueMax = 1):
    """
    Initialize the Converter class.

    Create two Queues for Thread / Multiprocessor safe comminucation of 
    the converted Data, one for the Physics Thread, one for the Display Thread.

    Keyword Arguments:
    dataSource -- An instance of a DataSource to be used for conversion
    mapDir -- The directory that the maps are stored in (default "")
    queueMax -- The maximum number of stored versions of converted data
    """

    # Queue Items
    self.queueMax = queueMax
    self.physicsQueue = Queue(queueMax)
    self.graphicsQueue = Queue(queueMax)

    # Constants from the constructor
    self.dataThread = dataSource
    self.mapDir = mapDir
    
    # Variables to store various states of the thread
    self.error = False
    self.exit = False
    self.ready = False
    
    # Start the thread and the other process
    multiprocessing.Process.__init__(self)
    self.dataThread.start()

  def stripExtension(self, path):
    """
    Remove the file extension given its path, if it exists.

    Keyword Arguments:
    path -- path to the file
    """
    
    # Get the filename
    filename = path.split("/")[-1]

    # Remove the part after the last dot
    noExtension = filename.split(".")[:-1]

    # Restore any periods within the filename
    return ".".join(noExtension)
 
  def getOutPath(self, name):
    """
    Generate an overwrite-safe file path for a given name.

    Add numbers to the end of the filename to avoid over-writing
    an existing filename.

    Keyword Arguments:
    name -- The desired name of the file

    Returns:
      A safe path to an available file
    """

    # Add a number to the end of the file if one exists already
    # to avoid over-writing
    newName = name
    count = 1
    while(os.path.exists(newName)):
      newName = self.stripExtension(name) + str(count) + ".csv"

    # Return a path to the file, taking into account the mapDir
    return self.mapDir +"/"+ newName if(self.mapDir != "") else newName

  def run(self):
    self.error = self.dataThread.error
    
    if(self.dataThread.error):
      self.stop()

    while(not(self.exit)):
      dataPlane = self.dataThread.get()

      if(dataPlane is None):
        continue

      if(isinstance(self.dataThread, sources.KinectSource)):
        dataPlane = errors.averageErrors(dataPlane, Converter.errorVal)
        dataPlane = filters.flipSurface(dataPlane)
        self.export(dataPlane)
      dataPlane = filters.averagePass(dataPlane, 5)

      if(not(self.physicsQueue.full())): self.physicsQueue.put(dataPlane)
      if(not(self.graphicsQueue.full())): self.graphicsQueue.put(dataPlane)
      self.stop()

  def getReady(self):
    return self.ready

  def terminate(self):
    self.stop()
    multiprocessing.Process.terminate(self)

  def stop(self):
    self.dataThread.stop()
    self.exit = True

  def export(self, dataPlane):
    path = self.getOutPath(dataPlane.name)
    writer.writePlaneToFile(dataPlane, path)
コード例 #44
0
class HEZDetector(Process):
    def __init__(self):
        super(HEZDetector, self).__init__()
        self.mark_positions = list()
        self.queue = Queue(1)

    def run(self):
        self.process()

    def process(self):
        prev_time = time.time()

        # camera
        cap = cv2.VideoCapture(0)

        # check camera
        check, test_frame = cap.read()

        # read the configuration
        lrc = LandmarkRecognitionConfiguration()
        lf = LandmarkFrame(lrc)

        if not check:
            print 'Camera not found'
            exit(-1)

        print 'FRAME SIZE:' + str(len(test_frame[0])) + ' ' + str(len(test_frame[1]))

        frame_center = (len(test_frame[0]) / 2, len(test_frame[1]) / 2)

        hmark = h_mark_processor.HMarkProcessor()
        emark = e_mark_processor.EMarkProcessor()
        zmark = z_mark_processor.ZMarkProcessor()

        SEARCH_ALL_LANDMARKS = lrc.get_find_mode()

        kernel = np.ones((3, 3), np.uint8)

        cv2.namedWindow("frame1", cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow("frame2", cv2.WINDOW_AUTOSIZE)
        # cv2.namedWindow("frame2_bin0", cv2.WINDOW_AUTOSIZE)
        # cv2.namedWindow("frame2_bin1", cv2.WINDOW_AUTOSIZE)
        # cv2.namedWindow("frame2_bin2", cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow("frame3", cv2.WINDOW_AUTOSIZE)
        cv2.namedWindow("frame4", cv2.WINDOW_AUTOSIZE)

        display_frame_size = lrc.get_frame_size()

        test_int = 0
        while (True):
            test_int += 1
            try:
                # FOR LOW PERFORMANCE IMITATION, ONLY FOR TEST
                cur_time = time.time()
                if cur_time - prev_time < 0.5:
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                    time.sleep(0.5)
                    continue

                prev_time = cur_time

                # Capture frame-by-frame
                ret, frame = cap.read()

                if not ret:
                    continue

                try:
                    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                except:
                    print 'Get frame ERROR'
                    continue

                # simple
                gray_tr = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # binary_images = lf.get_frames(gray_tr)

                # BEGIN PROCESSING CONTOURS FOR CURRENT FRAME
                # for binary_source_image in binary_images:
                _, binary_source_image = cv2.threshold(gray_tr, 155, 255, cv2.THRESH_BINARY)

                # flag about that mark was found or not
                mark_found = False

                # morphology extraction and dilate image for reduce noise in binary image
                # to remove dots in image for reduce contour count to processing
                opening = cv2.morphologyEx(binary_source_image, cv2.MORPH_OPEN, kernel, iterations=1)
                bin_res = cv2.dilate(opening, kernel, iterations=1)
                binary_result = cv2.medianBlur(bin_res, 3)

                # WE FIND ALL CONTOURS IN IMAGE TO PROCESS

                # 3.0.0. im2, contours, hierarchy = cv2.findContours(binary_result, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                contours, _ = cv2.findContours(binary_result, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
                # PROCESSING ALL CONTOURS FOR CURRENT FRAME
                for contour in contours:
                    # mark_found = False
                    contour = cv2.approxPolyDP(contour, 2, False)

                    # finding circle by geometrically criteria
                    area = cv2.contourArea(contour, oriented=False)
                    perim = cv2.arcLength(contour, closed=True)
                    ratio = None
                    if perim > 0:
                        ratio = area / (perim * perim)

                    # WAS FOUND CONTOUR WHICH LOOKS LIKE CIRCLE
                    if perim > 0 and 0.07 < ratio < 0.087:
                        x, y, width, height = cv2.boundingRect(contour)
                        roi_frame = np.copy(frame[y:y + height, x:x + width])
                        roi_circle = np.copy(binary_source_image[y:y + height, x:x + width])

                        tt = None
                        circle_inner_contours = None
                        circle_inner_contours_hierarchy = None
                        # just in try except,
                        # because some time we can get error durning work of cv2.findContours function
                        try:
                            # 3.0.0. tt, circle_inner_contours, circle_inner_contours_hierarchy = cv2.findContours(
                            circle_inner_contours, circle_inner_contours_hierarchy = cv2.findContours(
                                    np.copy(roi_circle),
                                    cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_SIMPLE)
                        except Exception as ex:
                            print ex
                            continue

                        # draw contour for visual debugging
                        center, radius = cv2.minEnclosingCircle(contour)
                        cv2.circle(frame, (int(center[0]), int(center[1])), int(radius), (255, 0, 255), 3)

                        # display distance
                        # distance_to_mark = hmark.calculateDistance(radius)
                        # distance_to_mark = int(distance_to_mark)

                        # cv2.putText(frame, 'DISTANCE: ' + str(distance_to_mark) + ' cm',
                        #             (int(center[0]), int(center[1])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 50, 255))

                        # PROCESS ALL CONTOURS IN CIRCLE
                        for contour_in_circle in circle_inner_contours:
                            contour_in_circle = cv2.approxPolyDP(contour_in_circle, 2, True)

                            # TODO обрабатывать только контуры, точки которых находятся внутри круга
                            shifted_center = (center[0] - x, center[1] - y)
                            if len(contour_in_circle) < 5 or fabs(cv2.arcLength(contour_in_circle, True)) < 20:
                                continue

                            cv2.circle(binary_source_image, (int(center[0]), int(center[1])), int(radius), (0, 0, 255),
                                       3)

                            # we get the minimum rectangle covering the circuit
                            rect = cv2.minAreaRect(contour_in_circle)
                            # 3.0.0 box = cv2.cv.boxPoints(rect)
                            box = cv2.cv.BoxPoints(rect)
                            box = np.int0(box)

                            # check to H mark
                            h_mark_points = hmark.getBoxROI(box)
                            result_h_mark = hmark.checkBoxROIToHMark(roi_circle, h_mark_points, True, 95)

                            if result_h_mark:
                                cv2.drawContours(roi_frame, [box], 0, (0, 255, 0), 2)
                                hmark.drawMarkType(frame)
                                hmark.drawMark(roi_frame, h_mark_points, (x, y))

                            # check to E mark
                            e_mark_points = emark.getBoxROI(box)
                            result_e_mark = emark.checkBoxROIToHMark(roi_circle, e_mark_points, True)

                            if result_e_mark:
                                cv2.drawContours(roi_frame, [box], 0, (0, 255, 0), 2)
                                emark.drawMarkType(frame)
                                emark.drawMark(roi_frame, h_mark_points, (x, y))

                            # Check to Z mark
                            z_mark_points = zmark.getBoxROI(box)
                            result_z_mark = zmark.checkBoxROIToHMark(roi_circle, z_mark_points, True)

                            if result_z_mark:
                                zmark.drawMarkType(frame)
                                cv2.drawContours(roi_frame, [box], 0, (0, 255, 0), 2)
                                zmark.drawMark(roi_frame, z_mark_points, (x, y))

                            # cv2.imshow('frame3', cv2.resize(roi_frame, display_frame_size))

                            if result_z_mark or result_e_mark or result_h_mark:
                                mark_found = True  # set that mark found in current frame, skip all other contours

                                # IF MARK WAS FOUND INFORM CONTROLLING PROCESS ABOUT MARK POSITION
                                # BEGIN SEND DATA TO ANOTHER PROCESS
                                # CHECK ABOUT THAT CONTROLLER PROCESS EVALUATED PREVIOUS INFORMATION
                                if not self.queue.full():
                                    center01 = hmark.middlePoint(box[0], box[1])
                                    center23 = hmark.middlePoint(box[2], box[3])
                                    mark_center = hmark.middlePoint(center01, center23)
                                    deviation = mark_center[0] - frame_center[0], mark_center[1] - frame_center[1]
                                    mark_type = None
                                    if result_h_mark:
                                        mark_type = 'H'
                                    else:
                                        if result_e_mark:
                                            mark_type = 'E'
                                        else:
                                            if result_z_mark:
                                                mark_type = 'Z'

                                    self.queue.put((mark_type, deviation))
                                # END SEND DATA TO ANOTHER PROCESS

                                break

                        # draw cirle if mark was found
                        if mark_found:
                            cv2.circle(frame, (int(center[0]), int(center[1])), int(radius), (0, 0, 255), 3)
                            cv2.circle(frame, (int(center[0]), int(center[1])), 1, (0, 0, 255), 3)

                        # when all contours was processed check the result,
                        # if found and mode was set to seach only first mark skip other contours
                        if mark_found and SEARCH_ALL_LANDMARKS:
                            break  # break the main loop of contour processing, because mark was found for current frame

                            # cv2.imshow('frame4', cv2.resize(roi_circle, display_frame_size))
                # cv2.imshow('frame2', cv2.resize(binary_source_image, display_frame_size))

                # cv2.imshow('frame2_bin0', cv2.resize(binary_images[0], display_frame_size))
                # cv2.imshow('frame2_bin1', cv2.resize(binary_images[1], display_frame_size))
                # cv2.imshow('frame2_bin2', cv2.resize(binary_images[2], display_frame_size))

                # END PROCESSING CONTOURS FOR CURRENT FRAME
                cv2.imshow('frame1', cv2.resize(frame, display_frame_size))

                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break

            except Exception as ex:
                print ex
        print ex.argsq

        # When everything done, release the capture
        cap.release()
コード例 #45
0
class OpenQuoteContext:
    def __init__(self, host="127.0.0.1", sync_port=11111, async_port=11111):
        """
        create a context to established a network connection
        :param host:the address of the network connection
        :param sync_port:network connection port for synchronous communication
        :param async_port:network connection port for asynchronous communication,receiving client data push
        """
        self.__host = host
        self.__sync_port = sync_port
        self.__async_port = async_port

        self._req_queue = Queue()
        self._handlers_ctx = HandlerContext()

        self._async_ctx = _AsyncNetworkManager(self.__host, self.__async_port, self._handlers_ctx)
        self._proc_run = False
        self._sync_net_ctx = _SyncNetworkQueryCtx(self.__host, self.__sync_port, long_conn=True)
        self._net_proc = Thread(target=_net_proc,
                                args=(self._async_ctx,
                                      self._req_queue,))

    def __del__(self):
        if self._proc_run:
            self._proc_run = False
            self._stop_net_proc()
            self._net_proc.join(timeout=5)

    def set_handler(self, handler):
        return self._handlers_ctx.set_handler(handler)

    def start(self):
        """
        start the receiving thread,asynchronously receive the data pushed by the client
        """
        self._net_proc = Thread(target=_net_proc,
                                args=(self._async_ctx,
                                      self._req_queue,))
        self._net_proc.start()
        self._proc_run = True

    def stop(self):
        """
        stop the receiving thread, no longer receive the data pushed by the client
        """
        if self._proc_run:
            self._stop_net_proc()
            self._net_proc.join(timeout=5)
            self._proc_run = False
        self._net_proc = Thread(target=_net_proc,
                                args=(self._async_ctx,
                                      self._req_queue,))

    def _send_sync_req(self, req_str):
        """
        send a synchronous request
        """
        ret, msg, content = self._sync_net_ctx.network_query(req_str)
        if ret != RET_OK:
            return RET_ERROR, msg, None
        return RET_OK, msg, content

    def _send_async_req(self, req_str):
        """
        send a asynchronous request
        """
        if self._req_queue.full() is False:
            try:
                self._req_queue.put((True, req_str), timeout=1)
                return RET_OK, ''
            except Exception as e:
                _ = e
                err = sys.exc_info()[1]
                error_str = ERROR_STR_PREFIX + str(err)
                return RET_ERROR, error_str
        else:
            error_str = ERROR_STR_PREFIX + "Request queue is full. The size: %s" % self._req_queue.qsize()
            return RET_ERROR, error_str

    def _get_sync_query_processor(self, pack_func, unpack_func):
        """
        synchronize the query processor
        :param pack_func: back
        :param unpack_func: unpack
        :return: sync_query_processor
        """
        send_req = self._send_sync_req

        def sync_query_processor(**kargs):
            ret_code, msg, req_str = pack_func(**kargs)
            if ret_code == RET_ERROR:
                return ret_code, msg, None

            ret_code, msg, rsp_str = send_req(req_str)

            if ret_code == RET_ERROR:
                return ret_code, msg, None

            ret_code, msg, content = unpack_func(rsp_str)
            if ret_code == RET_ERROR:
                return ret_code, msg, None
            return RET_OK, msg, content

        return sync_query_processor

    def _stop_net_proc(self):
        """
        stop the request of network
        :return: (ret_error,error_str)
        """
        if self._req_queue.full() is False:
            try:
                self._req_queue.put((False, None), timeout=1)
                return RET_OK, ''
            except Exception as e:
                _ = e
                err = sys.exc_info()[1]
                error_str = ERROR_STR_PREFIX + str(err)
                return RET_ERROR, error_str
        else:
            error_str = ERROR_STR_PREFIX + "Cannot send stop request. queue is full. The size: %s" \
                                           % self._req_queue.qsize()
            return RET_ERROR, error_str

    def get_trading_days(self, market, start_date=None, end_date=None):

        if market is None or isinstance(market, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
            return RET_ERROR, error_str

        if start_date is not None and isinstance(start_date, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of start_date param is wrong"
            return RET_ERROR, error_str

        if end_date is not None and isinstance(end_date, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of end_date param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(TradeDayQuery.pack_req,
                                                         TradeDayQuery.unpack_rsp)

        # the keys of kargs should be corresponding to the actual function arguments
        kargs = {'market': market, 'start_date': start_date, "end_date": end_date}
        ret_code, msg, trade_day_list = query_processor(**kargs)

        if ret_code != RET_OK:
            return RET_ERROR, msg

        return RET_OK, trade_day_list

    def get_stock_basicinfo(self, market, stock_type='STOCK'):
        param_table = {'market': market, 'stock_type': stock_type}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(StockBasicInfoQuery.pack_req,
                                                         StockBasicInfoQuery.unpack_rsp)
        kargs = {"market": market, 'stock_type': stock_type}

        ret_code, msg, basic_info_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code', 'name', 'lot_size', 'stock_type']

        basic_info_table = pd.DataFrame(basic_info_list, columns=col_list)

        return RET_OK, basic_info_table

    def get_history_kline(self, code, start=None, end=None, ktype='K_DAY', autype='qfq'):

        if start is not None and isinstance(start, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of start param is wrong"
            return RET_ERROR, error_str

        if end is not None and isinstance(end, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of end param is wrong"
            return RET_ERROR, error_str

        param_table = {'code': code, 'ktype': ktype, 'autype': autype}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(HistoryKlineQuery.pack_req,
                                                         HistoryKlineQuery.unpack_rsp)
        kargs = {"stock_str": code, "start_date": start, "end_date": end, "ktype": ktype, "autype": autype}

        ret_code, msg, kline_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code', 'time_key', 'open', 'close', 'high', 'low', 'volume', 'turnover']
        kline_frame_table = pd.DataFrame(kline_list, columns=col_list)

        return RET_OK, kline_frame_table

    def get_autype_list(self, code_list):

        if code_list is None or isinstance(code_list, list) is False:
            error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
            return RET_ERROR, error_str

        for code in code_list:
            if code is None or isinstance(code, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(ExrightQuery.pack_req,
                                                         ExrightQuery.unpack_rsp)
        kargs = {"stock_list": code_list}
        ret_code, msg, exr_record = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code',
                    'ex_div_date',
                    'split_ratio',
                    'per_cash_div',
                    'per_share_div_ratio',
                    'per_share_trans_ratio',
                    'allotment_ratio',
                    'allotment_price',
                    'stk_spo_ratio',
                    'stk_spo_price',
                    'forward_adj_factorA',
                    'forward_adj_factorB',
                    'backward_adj_factorA',
                    'backward_adj_factorB']

        exr_frame_table = pd.DataFrame(exr_record, columns=col_list)

        return RET_OK, exr_frame_table

    def get_market_snapshot(self, code_list):
        if code_list is None or isinstance(code_list, list) is False:
            error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
            return RET_ERROR, error_str

        for code in code_list:
            if code is None or isinstance(code, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(MarketSnapshotQuery.pack_req,
                                                         MarketSnapshotQuery.unpack_rsp)
        kargs = {"stock_list": code_list}

        ret_code, msg, snapshot_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code', 'data_date', 'data_time', 'last_price', 'open_price',
                    'high_price', 'low_price', 'prev_close_price',
                    'volume', 'turnover', 'turnover_rate', 'suspension', 'listing_date'
                    ]

        snapshot_frame_table = pd.DataFrame(snapshot_list, columns=col_list)

        return RET_OK, snapshot_frame_table

    def subscribe(self, stock_code, data_type, push=False):
        """
        subcribe a sort of data for a stock
        :param stock_code: string stock_code . For instance, "HK.00700", "US.AAPL"
        :param data_type: string  data type. For instance, "K_1M", "K_MON"
        :param push: push option
        :return: (ret_code, ret_data). ret_code: RET_OK or RET_ERROR.
        """
        param_table = {'stock_code': stock_code, 'data_type': data_type}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscribe_req,
                                                         SubscriptionQuery.unpack_subscribe_rsp)

        # the keys of kargs should be corresponding to the actual function arguments
        kargs = {'stock_str': stock_code, 'data_type': data_type}
        ret_code, msg, _ = query_processor(**kargs)

        if ret_code != RET_OK:
            return RET_ERROR, msg

        if push:
            ret_code, msg, push_req_str = SubscriptionQuery.pack_push_req(stock_code, data_type)

            if ret_code != RET_OK:
                return RET_ERROR, msg

            ret_code, msg = self._send_async_req(push_req_str)
            if ret_code != RET_OK:
                return RET_ERROR, msg

        return RET_OK, None

    def unsubscribe(self, stock_code, data_type):
        """
        unsubcribe a sort of data for a stock
        :param stock_code: string stock_code . For instance, "HK.00700", "US.AAPL"
        :param data_type: string  data type. For instance, "K_1M", "K_MON"
        :return: (ret_code, ret_data). ret_code: RET_OK or RET_ERROR.
        """

        param_table = {'stock_code': stock_code, 'data_type': data_type}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_unsubscribe_req,
                                                         SubscriptionQuery.unpack_unsubscribe_rsp)
        # the keys of kargs should be corresponding to the actual function arguments
        kargs = {'stock_str': stock_code, 'data_type': data_type}

        ret_code, msg, _ = query_processor(**kargs)

        if ret_code != RET_OK:
            return RET_ERROR, msg

        return RET_OK, None

    def query_subscription(self):
        """
        get the current subscription table
        :return:
        """
        query_processor = self._get_sync_query_processor(SubscriptionQuery.pack_subscription_query_req,
                                                         SubscriptionQuery.unpack_subscription_query_rsp)

        ret_code, msg, subscription_table = query_processor()
        if ret_code == RET_ERROR:
            return ret_code, msg

        return RET_OK, subscription_table

    def get_stock_quote(self, code_list):
        """
        :param code_list:
        :return: DataFrame of quote data

        Usage:

        After subcribe "QUOTE" type for given stock codes, invoke

        get_stock_quote to obtain the data

        """
        if code_list is None or isinstance(code_list, list) is False:
            error_str = ERROR_STR_PREFIX + "the type of code_list param is wrong"
            return RET_ERROR, error_str

        for code in code_list:
            if code is None or isinstance(code, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of param in code_list is wrong"
                return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(StockQuoteQuery.pack_req,
                                                         StockQuoteQuery.unpack_rsp,
                                                         )
        kargs = {"stock_list": code_list}

        ret_code, msg, quote_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code', 'data_date', 'data_time', 'last_price', 'open_price',
                    'high_price', 'low_price', 'prev_close_price',
                    'volume', 'turnover', 'turnover_rate', 'amplitude', 'suspension', 'listing_date'
                    ]

        quote_frame_table = pd.DataFrame(quote_list, columns=col_list)

        return RET_OK, quote_frame_table

    def get_rt_ticker(self, code, num=500):
        """
        get transaction information
        :param code: stock code
        :param num: the default is 500
        :return: (ret_ok, ticker_frame_table)
        """

        if code is None or isinstance(code, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
            return RET_ERROR, error_str

        if num is None or isinstance(num, int) is False:
            error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(TickerQuery.pack_req,
                                                         TickerQuery.unpack_rsp,
                                                         )
        kargs = {"stock_str": code, "num": num}
        ret_code, msg, ticker_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['stock_code', 'time', 'price', 'volume', 'turnover', "ticker_direction", 'sequence']
        ticker_frame_table = pd.DataFrame(ticker_list, columns=col_list)

        return RET_OK, ticker_frame_table

    def get_cur_kline(self, code, num, ktype='K_DAY', autype='qfq'):
        """
        get current kline
        :param code: stock code
        :param num:
        :param ktype: the type of kline
        :param autype:
        :return:
        """
        param_table = {'code': code, 'ktype': ktype}
        for x in param_table:
            param = param_table[x]
            if param is None or isinstance(param, str) is False:
                error_str = ERROR_STR_PREFIX + "the type of %s param is wrong" % x
                return RET_ERROR, error_str

        if num is None or isinstance(num, int) is False:
            error_str = ERROR_STR_PREFIX + "the type of num param is wrong"
            return RET_ERROR, error_str

        if autype is not None and isinstance(autype, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of autype param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(CurKlineQuery.pack_req,
                                                         CurKlineQuery.unpack_rsp,
                                                         )

        kargs = {"stock_str": code, "num": num, "ktype": ktype, "autype": autype}
        ret_code, msg, kline_list = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        col_list = ['code', 'time_key', 'open', 'close', 'high', 'low', 'volume', 'turnover']
        kline_frame_table = pd.DataFrame(kline_list, columns=col_list)

        return RET_OK, kline_frame_table

    def get_order_book(self, code):
        if code is None or isinstance(code, str) is False:
            error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
            return RET_ERROR, error_str

        query_processor = self._get_sync_query_processor(OrderBookQuery.pack_req,
                                                         OrderBookQuery.unpack_rsp,
                                                         )

        kargs = {"stock_str": code}
        ret_code, msg, orderbook = query_processor(**kargs)
        if ret_code == RET_ERROR:
            return ret_code, msg

        return RET_OK, orderbook
コード例 #46
0
ファイル: QueueTest.py プロジェクト: rflrob/YildizLabCode
from multiprocessing import Queue, Pool, Manager
from sys import getsizeof


def addtoqueue(i, queue):
	"""docstring for addtoqueue"""
	pass


if __name__ == '__main__':
	q = Queue()
	m = Manager()
	
	l = m.list()

	print 'Queue Starting Size:', getsizeof(q)
	print 'List Starting Size:', getsizeof(l)

	for i in range(10000000):
		if q.full(): 
			print "Oh no! Queue is full after only %d iterations" % i
		l.append((i, 2.1, 2.2, 2.3, 2.4))
		if i % 10000 == 0: print i
	
	print 'Queue Full Size: ', getsizeof(q), ' (', getsizeof(q)/1024**2, ' MB)'
	print 'List Full Size: ', getsizeof(l), ' (', getsizeof(l)/1024**2, ' MB)'

	while not q.empty():
		q.get()
	
コード例 #47
0
class TopicLogger(MongoDBLogger):
    """
    This class implements a generic topic logger.
    It simply dumps all messages received from the topic into the MongoDB.
    """

    def __init__(self, name, topic, collname, mongodb_host, mongodb_port, mongodb_name, max_queuesize=QUEUE_MAXSIZE):
        MongoDBLogger.__init__(self, name, topic, collname, mongodb_host, mongodb_port, mongodb_name)
        self.worker_out_counter = Counter()
        self.worker_in_counter = Counter()
        self.worker_drop_counter = Counter()
        self.queue = Queue(max_queuesize)

    def _init(self):
        """
        This method initializes this process.
        It initializes the connection to the MongoDB and subscribes to the topic.
        """
        self.mongoconn = Connection(self.mongodb_host, self.mongodb_port)
        self.mongodb = self.mongoconn[self.mongodb_name]
        self.mongodb.set_profiling_level = SLOW_ONLY

        self.collection = self.mongodb[self.collname]
        self.collection.count()

        self.queue.cancel_join_thread()
        self.subscriber = None
        while not self.subscriber:
            try:
                msg_class, real_topic, msg_eval = rostopic.get_topic_class(self.topic, blocking=True)
                self.subscriber = rospy.Subscriber(real_topic, msg_class, self._enqueue, self.topic)
            except rostopic.ROSTopicIOException:
                rospy.logwarn("FAILED to subscribe, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
            except rospy.ROSInitException:
                rospy.logwarn("FAILED to initialize, will keep trying %s" % self.name)
                time.sleep(randint(1, 10))
                self.subscriber = None

    def run(self):
        """
        This method does the actual logging.
        """
        self._init()
        rospy.logdebug("ACTIVE: %s" % self.name)
        # Process the messages
        while not self.is_quit():
            self._dequeue()

        # we must make sure to clear the queue before exiting,
        # or the parent thread might deadlock otherwise
        self.subscriber.unregister()
        self.subscriber = None
        while not self.queue.empty():
            self.queue.get_nowait()
        rospy.logdebug("STOPPED: %s" % self.name)

    def shutdown(self):
        self.queue.put("shutdown")
        super(TopicLogger, self).shutdown()

    def _sanitize_value(self, v):
        if isinstance(v, rospy.Message):
            return self._message_to_dict(v)
        elif isinstance(v, genpy.rostime.Time):
            t = datetime.utcfromtimestamp(v.secs)
            return t + timedelta(microseconds=v.nsecs / 1000.)
        elif isinstance(v, genpy.rostime.Duration):
            return v.secs + v.nsecs / 1000000000.
        elif isinstance(v, list):
            return [self._sanitize_value(t) for t in v]
        else:
            return v

    def _message_to_dict(self, val):
        d = {}
        for f in val.__slots__:
            d[f] = self._sanitize_value(getattr(val, f))
        return d

    def qsize(self):
        return self.queue.qsize()

    def _enqueue(self, data, topic, current_time=None):
        if not self.is_quit():
            if self.queue.full():
                try:
                    self.queue.get_nowait()
                    self.worker_drop_counter.increment()
                except Empty:
                    pass
            self.queue.put((topic, data, rospy.get_time()))
            self.worker_in_counter.increment()

    def _dequeue(self):
        try:
            t = self.queue.get(True)
        except IOError:
            self.quit = True
            return
        if isinstance(t, tuple):
            self.worker_out_counter.increment()
            topic = t[0]
            msg = t[1]
            ctime = t[2]

            if isinstance(msg, rospy.Message):
                doc = self._message_to_dict(msg)
                doc["__recorded"] = ctime or datetime.now()
                doc["__topic"] = topic
                try:
                    self.collection.insert(doc)
                except (InvalidStringData, InvalidDocument), e:
                    rospy.logerr("%s %s@%s:\n%s" % (e.__class__.__name__, current_process().name, topic, e))
        else:
コード例 #48
0
ファイル: video.py プロジェクト: dacer250/Object-detection
def video(args):
    """
    Read and apply object detection to input video stream
    """

    # Set the multiprocessing logger to debug if required
    if args["logger_debug"]:
        logger = multiprocessing.log_to_stderr()
        logger.setLevel(multiprocessing.SUBDEBUG)

    # Multiprocessing: Init input and output Queue, output Priority Queue and pool of workers
    input_q = Queue(maxsize=args["queue_size"])
    output_q = Queue(maxsize=args["queue_size"])
    output_pq = PriorityQueue(maxsize=3*args["queue_size"])
    pool = Pool(args["num_workers"], worker, (input_q,output_q))
    
    # created a threaded video stream and start the FPS counter
    vs = cv2.VideoCapture("inputs/{}".format(args["input_videos"]))
    fps = FPS().start()

    # Define the codec and create VideoWriter object
    if args["output"]:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('outputs/{}.avi'.format(args["output_name"]),
                              fourcc, vs.get(cv2.CAP_PROP_FPS),
                              (int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)),
                               int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT))))

    # Start reading and treating the video stream
    if args["display"] > 0:
        print()
        print("=====================================================================")
        print("Starting video acquisition. Press 'q' (on the video windows) to stop.")
        print("=====================================================================")
        print()

    countReadFrame = 0
    countWriteFrame = 1
    nFrame = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
    firstReadFrame = True
    firstTreatedFrame = True
    firstUsedFrame = True
    while True:
        # Check input queue is not full
        if not input_q.full():
            # Read frame and store in input queue
            ret, frame = vs.read()
            if ret:            
                input_q.put((int(vs.get(cv2.CAP_PROP_POS_FRAMES)),frame))
                countReadFrame = countReadFrame + 1
                if firstReadFrame:
                    print(" --> Reading first frames from input file. Feeding input queue.\n")
                    firstReadFrame = False

        # Check output queue is not empty
        if not output_q.empty():
            # Recover treated frame in output queue and feed priority queue
            output_pq.put(output_q.get())
            if firstTreatedFrame:
                print(" --> Recovering the first treated frame.\n")
                firstTreatedFrame = False
                
        # Check output priority queue is not empty
        if not output_pq.empty():
            prior, output_frame = output_pq.get()
            if prior > countWriteFrame:
                output_pq.put((prior, output_frame))
            else:
                countWriteFrame = countWriteFrame + 1
                output_rgb = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)

                # Write the frame in file
                if args["output"]:
                    out.write(output_rgb)

                # Display the resulting frame
                if args["display"]:
                    cv2.imshow('frame', output_rgb)
                    fps.update()

                if firstUsedFrame:
                    print(" --> Start using recovered frame (displaying and/or writing).\n")
                    firstUsedFrame = False

                
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        print("Read frames: %-3i %% -- Write frame: %-3i %%" % (int(countReadFrame/nFrame * 100), int(countWriteFrame/nFrame * 100)), end ='\r')
        if((not ret) & input_q.empty() & output_q.empty() & output_pq.empty()):
            break


    print("\nFile have been successfully read and treated:\n  --> {}/{} read frames \n  --> {}/{} write frames \n".format(countReadFrame,nFrame,countWriteFrame-1,nFrame))
    
    # When everything done, release the capture
    fps.stop()
    pool.terminate()
    vs.release()
    if args["output"]:
        out.release()
    cv2.destroyAllWindows()
コード例 #49
0
def multiprocess_progress(data, functor, finished, data_size, early_clip=None):
    from multiprocessing import Process, current_process, Queue

    num_procs = os.cpu_count()-1

    def worker(wnum, input_queue, output_queue):
        os.sched_setaffinity(0, [wnum])
        while True:
            try:
                idx, value = input_queue.get(block=False)
                if value == 'STOP':
                    break
                output_queue.put((idx, functor(value)))
            except:
                pass
            os.sched_yield()

    task_queue = Queue(2*num_procs)
    done_queue = Queue(2*num_procs)

    # Launch workers.
    print('Running {} workers ...'.format(num_procs))
    processes = []
    for i in range(num_procs):
        processes.append(Process(target = worker,
            args = (i, task_queue, done_queue),
            name = 'worker {}'.format(i),
            daemon = True))
        processes[-1].start()

    # Push input data, and check for output data.
    num_sent = 0
    num_done = 0
    num_clipped = 0
    iterator = iter(data)
    perc = 0

    def print_progress(msg=None):
        msg_str = ''
        if msg is not None:
            msg_str = '['+msg+']'
        print('\033[2K\r{} sent, {} done, {} clipped, {} total ({} %) {}'.format(num_sent, 
            num_done, num_clipped, data_size, perc, msg_str), end='')

    while num_done < data_size:
        print_progress('sending work')

        while num_sent < data_size and not task_queue.full():
            nextval = next(iterator)
            clipped = False
            if early_clip is not None:
                clipped, clip_result = early_clip(num_sent, nextval)
                if clipped:
                    finished(num_sent, clip_result)
                    num_clipped += 1
                    num_done += 1

            if not clipped:
                task_queue.put((num_sent, nextval))

            num_sent += 1
            os.sched_yield()

        while True:
            try:
                i, result = done_queue.get(block=False)
                finished(i, result)
                num_done += 1
                perc = int(num_done / data_size * 100)
                print_progress('collecting results')
            except:
                break;
            time.sleep(0)

        print_progress()
        time.sleep(0)

    # Terminate workers.
    for i in range(num_procs):
        task_queue.put((-1, 'STOP'))

    for p in processes:
        p.join()

    print('\n ... done')
コード例 #50
0
def run_parallel(reader, modifiers, filters, formatters, writers, threads=2, timeout=30,
                 preserve_order=False, input_queue_size=0, result_queue_size=0,
                 use_writer_process=True, compression=None):
    """
    Execute atropos in parallel mode.
    
    reader 				:: iterator over batches of reads (most likely a BatchIterator)
    modifiers 			::
    filters 			::
    formatters 			::
    writers				::
    threads				:: number of worker threads to use; additional threads are used
                        for the main proccess and the writer process (if requested).
    timeout				:: number of seconds after which waiting processes escalate their
                        messages from DEBUG to ERROR.
    preserve_order 		:: whether to preserve the input order of reads when writing
                        (only valid when `use_writer_process=True`)
    input_queue_size 	:: max number of items that can be in the input queue, or 0 for
                        no limit (be warned that this could explode memory usage)
    result_queue_size	:: max number of items that can be in the result queue, or 0 for
                        no limit (be warned that this could explode memory usage)
    use_writer_process	:: if True, a separate thread will be used to write results to
                        disk. Otherwise, each worker thread will write its results to
                        an output file with a '.N' extension, where N is the thread index.
                        This is useful in cases where the I/O is the main bottleneck.
    compression	        If "writer", the writer process perform data compression, otherwise
                        the worker processes performs compression.
    """
    logging.getLogger().debug(
        "Starting atropos in parallel mode with threads={}, timeout={}".format(threads, timeout))
    
    assert threads >= 2
    
    # Reserve a thread for the writer process if it will be doing the compression and if one is available.
    if compression is None:
        compression = "writer" if use_writer_process and can_use_system_compression() else "worker"
    if compression == "writer" and threads > 2:
        threads -= 1
    
    timeout = max(timeout, RETRY_INTERVAL)
    
    # Queue by which batches of reads are sent to worker processes
    input_queue = Queue(input_queue_size)
    # Queue by which results are sent from the worker processes to the writer process
    result_queue = Queue(result_queue_size)
    # Queue for processes to send summary information back to main process
    summary_queue = Queue(threads)
    # Aggregate summary
    summary = Summary(trimmer_classes=modifiers.get_trimmer_classes())
    # Return code (0=normal, anything else is an error)
    rc = 0
    
    if use_writer_process:
        worker_result_handler = QueueResultHandler(result_queue)
        if compression == "writer":
            worker_result_handler = WorkerResultHandler(worker_result_handler)
        else:
            worker_result_handler = CompressingWorkerResultHandler(worker_result_handler)
        
        # Shared variable for communicating with writer thread
        writer_control = Control(WRITER_ACTIVE)
        # result handler
        if preserve_order:
            writer_result_handler = OrderPreservingWriterResultHandler(
                writers, compressed=compression == "worker")
        else:
            writer_result_handler = WriterResultHandler(
                writers, compressed=compression == "worker")
        # writer process
        writer_process = WriterProcess(writer_result_handler, result_queue, writer_control, timeout)
        writer_process.start()
    else:
        worker_result_handler = WorkerResultHandler(WriterResultHandler(writers, use_suffix=True))

    # worker processes
    def launch_workers(n, offset=0):
        logging.getLogger().info("Starting {} worker processes".format(n))
        workers = [
            WorkerProcess(
                i+offset, modifiers, filters, formatters, input_queue,
                worker_result_handler, summary_queue, timeout)
            for i in range(n)
        ]
        # start workers
        for worker in workers: worker.start()
        return workers
    
    def ensure_alive():
        alive = [worker.is_alive() for worker in worker_processes]
        if not all(alive):
            raise Exception("One or more worker process exited: {}".format(",".join(
                str(i) for i in range(len(alive)) if not alive[i])))
        if use_writer_process and not (writer_process.is_alive() and writer_control.check_value(WRITER_ACTIVE)):
            raise Exception("Writer process exited")

    def enqueue_all(iterable):
        num_items = 0
        for item in iterable:
            def condition():
                try:
                    input_queue.put(item, block=True, timeout=RETRY_INTERVAL)
                    return True
                except Full:
                    return False
            wait_on(
                condition,
                wait_message="Main process waiting to queue item {}",
                timeout=timeout,
                fail_callback=ensure_alive)
            num_items += 1
        return num_items

    def wait_on_process(process, terminate=False):
        timeout_callback = lambda: process.terminate() if terminate else None
        wait_on(
            lambda: not process.is_alive(),
            wait_message="Waiting on {} to terminate {{}}".format(process.name),
            timeout=timeout,
            wait=lambda: process.join(RETRY_INTERVAL),
            timeout_callback=timeout_callback)
    
    # Start worker processes, reserve a thread for the reader process,
    # which we will get back after it completes
    worker_processes = launch_workers(threads - 1)
    
    try:
        # Add batches of reads to the input queue. Provide a timeout callback
        # to check that subprocesses are alive.
        num_batches = enqueue_all(enumerate(reader, 1))
        logging.getLogger().debug(
            "Main loop complete; saw {} batches".format(num_batches))
        
        # Tell the worker processes no more input is coming
        enqueue_all((None,) * threads)
        
        # Tell the writer thread the max number of batches to expect
        if use_writer_process:
            writer_control.set_value(num_batches)
        
        # Now that the reader process is done, it essentially
        # frees up another thread to use for a worker
        worker_processes += launch_workers(1, threads-1)
        
        # Wait for all summaries to be available on queue
        def summary_timeout_callback():
            alive = [worker.is_alive() for worker in worker_processes]
            if any(alive):
                missing = ",".join(str(i) for i in range(len(alive)) if alive[i])
                logging.getLogger().error(
                    "Workers are still alive and haven't returned summaries: {}".format(missing))
        wait_on(
            lambda: summary_queue.full(),
            wait_message="Waiting on worker summaries {}",
            timeout=timeout,
            wait=True,
            timeout_callback=summary_timeout_callback)

        # Process summary information from worker processes
        logging.getLogger().debug("Processing summary information from worker processes")
        seen_summaries = set()
        seen_batches = set()
        def summary_fail_callback():
            missing_summaries = set(range(1, threads)) - seen_summaries
            raise Exception("Missing summaries from processes {}".format(
                ",".join(str(s) for s in missing)))
        for i in range(1, threads+1):
            batch = dequeue(
                summary_queue,
                fail_callback=summary_fail_callback)
            worker_index, worker_batches, process_stats, adapter_stats = batch
            if process_stats is None or adapter_stats is None:
                raise Exception("Worker process {} died unexpectedly".format(worker_index))
            else:
                logging.getLogger().debug("Processing summary for worker {}".format(worker_index))
            seen_summaries.add(worker_index)
            seen_batches |= worker_batches
            summary.add_process_stats(process_stats)
            summary.add_adapter_stats(adapter_stats)
        
        # Check if any batches were missed
        if num_batches > 0:
            missing_batches = set(range(1, num_batches+1)) - seen_batches
            if len(missing_batches) > 0:
                raise Exception("Workers did not process batches {}".format(
                    ",".join(str(b) for b in missing_batches)))
        
        if use_writer_process:
            # Wait for writer to complete
            wait_on_process(writer_process)
    
    except KeyboardInterrupt as e:
        logging.getLogger().error("Interrupted")
        rc = 130
    
    except IOError as e:
        if e.errno == errno.EPIPE:
            rc = 1
        else:
            raise
    
    except (FormatError, EOFError) as e:
        logging.getLogger().error("Atropos error", exc_info=True)
        rc = 1
    
    except Exception as e:
        logging.getLogger().error("Unknown error", exc_info=True)
        rc = 1
    
    finally:
        logging.getLogger().debug("Waiting for reader to close")
        reader.close()
        
        # notify all threads that they should stop
        logging.getLogger().debug("Exiting all processes")
        def kill(process):
            if rc <= 1:
                wait_on_process(process, terminate=True)
            elif process.is_alive():
                process.terminate()
        for process in worker_processes:
            kill(process)
        if use_writer_process:
            kill(writer_process)
    
    report = summary.finish() if rc == 0 else None
    
    details = dict(
        mode='parallel',
        threads=threads
    )
    
    return (rc, report, details)
コード例 #51
0
ファイル: data.py プロジェクト: apache/incubator-singa
class ImageBatchIter(object):
    '''Utility for iterating over an image dataset to get mini-batches.

    Args:
        img_list_file(str): name of the file containing image meta data; each
                            line consists of image_path_suffix delimiter meta_info,
                            where meta info could be label index or label strings, etc.
                            meta_info should not contain the delimiter. If the meta_info
                            of each image is just the label index, then we will parse the
                            label index into a numpy array with length=batchsize
                            (for compatibility); otherwise, we return a list of meta_info;
                            if meta info is available, we return a list of None.
        batch_size(int): num of samples in one mini-batch
        image_transform: a function for image augmentation; it accepts the full
                        image path and outputs a list of augmented images.
        shuffle(boolean): True for shuffling images in the list
        delimiter(char): delimiter between image_path_suffix and label, e.g.,
                         space or comma
        image_folder(boolean): prefix of the image path
        capacity(int): the max num of mini-batches in the internal queue.
    '''

    def __init__(self, img_list_file, batch_size, image_transform,
                 shuffle=True, delimiter=' ', image_folder=None, capacity=10):
        self.img_list_file = img_list_file
        self.queue = Queue(capacity)
        self.batch_size = batch_size
        self.image_transform = image_transform
        self.shuffle = shuffle
        self.delimiter = delimiter
        self.image_folder = image_folder
        self.stop = False
        self.p = None
        with open(img_list_file, 'r') as fd:
            self.num_samples = len(fd.readlines())

    def start(self):
        self.p = Process(target=self.run)
        self.p.start()
        return

    def __next__(self):
        assert self.p is not None, 'call start before next'
        while self.queue.empty():
            time.sleep(0.1)
        x, y = self.queue.get()  # dequeue one mini-batch
        return x, y

    def stop(self):
        self.end();

    def end(self):
        if self.p is not None:
            self.stop = True
            time.sleep(0.1)
            self.p.terminate()

    def run(self):
        img_list = []
        is_label_index = True
        for line in open(self.img_list_file, 'r'):
            item = line.strip('\n').split(self.delimiter)
            if len(item) < 2:
                is_label_index = False
                img_list.append((item[0].strip(), None))
            else:
                if not item[1].strip().isdigit():
                    # the meta info is not label index
                    is_label_index = False
                img_list.append((item[0].strip(), item[1].strip()))
        index = 0  # index for the image
        if self.shuffle:
            random.shuffle(img_list)
        while not self.stop:
            if not self.queue.full():
                x, y = [], []
                i = 0
                while i < self.batch_size:
                    img_path, img_meta = img_list[index]
                    aug_images = self.image_transform(
                            os.path.join(self.image_folder, img_path))
                    assert i + len(aug_images) <= self.batch_size, \
                        'too many images (%d) in a batch (%d)' % \
                        (i + len(aug_images), self.batch_size)
                    for img in aug_images:
                        ary = np.asarray(img.convert('RGB'), dtype=np.float32)
                        x.append(ary.transpose(2, 0, 1))
                        if is_label_index:
                            y.append(int(img_meta))
                        else:
                            y.append(img_meta)
                        i += 1
                    index += 1
                    if index == self.num_samples:
                        index = 0  # reset to the first image
                        if self.shuffle:
                            random.shuffle(img_list)
                # enqueue one mini-batch
                if is_label_index:
                    self.queue.put((np.asarray(x), np.asarray(y, dtype=np.int32)))
                else:
                    self.queue.put((np.asarray(x), y))
            else:
                time.sleep(0.1)
        return
コード例 #52
0
ファイル: data.py プロジェクト: mrqc/incubator-singa
class ImageBatchIter:
    '''Utility for iterating over an image dataset to get mini-batches.

    Args:
        img_list_file(str): name of the file containing image meta data; each
                            line consists of image_path_suffix delimiter label
        batch_size(int): num of samples in one mini-batch
        image_transform: a function for image augmentation; it accepts the full
                        image path and outputs a list of augmented images.
        shuffle(boolean): True for shuffling images in the list
        delimiter(char): delimiter between image_path_suffix and label, e.g.,
                         space or comma
        image_folder(boolean): prefix of the image path
        capacity(int): the max num of mini-batches in the internal queue.
    '''

    def __init__(self, img_list_file, batch_size, image_transform,
                 shuffle=True, delimiter=' ', image_folder=None, capacity=10):
        self.img_list_file = img_list_file
        self.queue = Queue(capacity)
        self.batch_size = batch_size
        self.image_transform = image_transform
        self.shuffle = shuffle
        self.delimiter = delimiter
        self.image_folder = image_folder
        self.stop = False
        self.p = None
        with open(img_list_file, 'r') as fd:
            self.num_samples = len(fd.readlines())

    def start(self):
        self.p = Process(target=self.run)
        self.p.start()
        return

    def next(self):
        assert self.p is not None, 'call start before next'
        while self.queue.empty():
            time.sleep(0.1)
        x, y = self.queue.get()  # dequeue one mini-batch
        return x, y

    def end(self):
        if self.p is not None:
            self.stop = True
            time.sleep(0.1)
            self.p.terminate()

    def run(self):
        img_list = []
        for line in open(self.img_list_file, 'r'):
            item = line.split(self.delimiter)
            img_path = item[0]
            img_label = int(item[1])
            img_list.append((img_label, img_path))
        index = 0  # index for the image
        while not self.stop:
            if index == 0 and self.shuffle:
                random.shuffle(img_list)
            if not self.queue.full():
                x = []
                y = np.empty(self.batch_size, dtype=np.int32)
                i = 0
                while i < self.batch_size:
                    img_label, img_path = img_list[index]
                    aug_images = self.image_transform(
                            os.path.join(self.image_folder, img_path))
                    assert i + len(aug_images) <= self.batch_size, \
                        'too many images (%d) in a batch (%d)' % \
                        (i + len(aug_images), self.batch_size)
                    for img in aug_images:
                        ary = np.asarray(img.convert('RGB'), dtype=np.float32)
                        x.append(ary.transpose(2, 0, 1))
                        y[i] = img_label
                        i += 1
                    index += 1
                    if index == self.num_samples:
                        index = 0  # reset to the first image
                # enqueue one mini-batch
                self.queue.put((np.asarray(x), y))
            else:
                time.sleep(0.1)
        return
コード例 #53
0
class NotificationPusher:
	useSense = False
	def __init__(self, nrConsumers=1, useSense = False, certfile = None, keyfile=None, sandbox = True):
		self.certfile = certfile
		self.keyfile = keyfile
		self.sandbox = sandbox

		#self.pool = Pool(10)
		self.queue = Queue(1000)
		self.processes = []
		if useSense:
			runner = NotificationPusher.runSense
		else:
			runner = NotificationPusher.runSelf
			self.apns = APNs(use_sandbox=self.sandbox, cert_file=self.certfile, key_file=self.keyfile)
			process = Process(name="apns feedback daemon", target=NotificationPusher.runFeedbackCheck, args=(self,))
			process.daemon = True
			process.start()
			self.processes.append(process)
			
		for i in range(nrConsumers):
			process = Process(name="apns sender daemon", target=runner, args=(self,))
			process.daemon = True
			process.start()
			self.processes.append(process)

	def runSense (self):
		while True:
			try:
				(deviceId, userMessage, payload) = self.queue.get()
				notification = {"device_type": "ios",
						"device_id": deviceId,
						"message":{
							"badge":1,
							"sound":"default",
							"alert":userMessage
							}
					}
				NotificationPusher.request('POST', '/jump/push', json.dumps(notification));
			except:
				logger.exception("Exception trying to send push notification via Sense")

	def runSelf (self):
		while True:
			try:
				(deviceId, userMessage, payload) = self.queue.get()
				payload = Payload(alert=userMessage, sound="default", badge=1)
				self.apns.gateway_server.send_notification(deviceId, payload)
			except:
				#Prepare for reconnect
				self.apns._gateway_server = None
				retrySucceed = False
				try:
					self.apns.gateway_server.send_notification(deviceId, payload)
					retrySucceed = True
				except:
					self.apns._gateway_server = None
				#log exception
				logger.exception("{}: Exception trying to send push notification. RetrySucceed={}".format(self.certfile, retrySucceed))


	def runFeedbackCheck (self):
		while True:
			try:
				for (timestamp, devicetoken) in self.apns.feedback_server.items():
					logger.info("{}:got feedback from apple push notification service: ({},{})".format(self.certfile, timestamp, devicetoken))
				time.sleep(5 * 60)
			except:
				#prepare for reconnect
				self.apns._feedback_server = None
				#log exception
				logger.exception("{}: Exception trying to get apple push notification feedback".format(self.certfile))

	@staticmethod
	def request(method,action,body):
		#url = "localhost:5000"
		url = "dev.sense-os.nl"
		headers = {"Content-type": "application/json"}
		conn = httplib.HTTPConnection(url)
		conn.request(method, action, body, headers)
		response = conn.getresponse()
		responseBody = response.read()
		if verbose:
			print "#"*80
			print '{}{}'.format(url,action)
			print response.status, response.reason, responseBody
			print "#"*80

		return responseBody

	def sendNotification(self, deviceId, userMessage, payload=None):
		if self.queue.full():
			logger.critical("{}: notification queue is full. {} items.".format(self.certfile, self.queue.qsize()))
		elif self.queue.qsize() >= WARN_THRESHOLD:
			logger.warning("{}: {} items in notification queue".format(self.certfile, self.queue.qsize()));
			
		self.queue.put((deviceId, userMessage, payload))
コード例 #54
0
ファイル: workerprocess.py プロジェクト: Ryusoru/DMA-3DPSP
class WorkerProcess(Process):
	def __init__(self, id, config, sequence, hist_obj, results_path, log_id):
		Process.__init__(self)
		self.id = id
		self.config = config
		self.sequence = sequence
		self.hist_obj = hist_obj
		self.agent = Agent(self.id, config, sequence)
		self.results_path = results_path
		self.log_id = log_id
		self.leader_send = None
		self.leader_recv = None
		self.support_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.root_div_send = None
		self.leader_div_send = None
		self.agent_div_recv = [None for i in range(1, self.config.num_agents)] if self.agent.id_leader == None else None
		self.support_div_recv = [None for i in range(1, self.config.num_sup+1)] if self.agent.id_supporters else None
		self.leader_reset_send = None
		self.leader_reset_recv = None
		self.support_reset_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_reset_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.event_restart = Event()
		self.stop_event = Event()
		self.support_stop_event = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.energy_number = Queue(1)
		self.support_energy_number = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
	
	def select_rand_solution(self, solutions):
		index = 0
		counter_sol = 0
		for sol in solutions:
			if sol != None:
				counter_sol += 1
		counter_sol -= 1 
		index = random.randint(0, counter_sol)
		return index
	
	def fitness_roulette_selection(self, solutions):
		fitness_last = 0
		fitness_total = 0
		fitness_acum = 0
		index = 0
		selection = random.uniform(0, 1)
		for sol in solutions:
			if sol != None:
				fitness_last = sol.energy_value
		for sol in solutions:
			if sol != None:
				fitness_total += fitness_last - sol.energy_value
		if fitness_total != 0:
			for sol in solutions:
				if sol != None:
					fitness_acum += fitness_last - sol.energy_value
					prob = fitness_acum / fitness_total
					if selection <= prob:
						break
					index += 1
		return index
	
	def send_solution(self, solution, queue):
		time_send_start = datetime.datetime.now()
		queue.put(copy.deepcopy(solution))
		self.agent.trx_send += 1
		self.agent.time_send += datetime.datetime.now() - time_send_start
	
	def receive_solution(self, queue, is_leader):
		time_receive_start = datetime.datetime.now()
		solution = queue.get()
		if is_leader:
			self.agent.update(solution)
		else:
			index = 0
			for sol in solution:
				if sol != None:
					self.agent.leader_pockets[index] = copy.deepcopy(sol)
				else:
					break
				index += 1
		self.agent.trx_receive += 1
		self.agent.time_receive += datetime.datetime.now() - time_receive_start
	
	# Para los pickles no olvidar agregar pickle.loads en agente 0 durante el reset
	def send_solution_pickle(self, solution, queue):
		time_send_start = datetime.datetime.now()
		buff = pickle.dumps(solution, 2)
		queue.put(buff)
		self.agent.trx_send += 1
		self.agent.time_send += datetime.datetime.now() - time_send_start
	
	def receive_solution_pickle(self, queue, is_leader):
		time_receive_start = datetime.datetime.now()
		buff = queue.get()
		solution = pickle.loads(buff)
		self.agent.trx_receive += 1
		self.agent.time_receive += datetime.datetime.now() - time_receive_start
		if is_leader:
			self.agent.update(solution)
		else:
			index = 0
			for sol in solution:
				if sol != None:
					self.agent.leader_pockets[index] = copy.deepcopy(sol)
				else:
					break
				index += 1
	
	def save_results(self):
		if not os.path.exists(self.results_path):
			try:
				os.makedirs(self.results_path)
			except:
				pass
		
		if self.agent.id_leader == None:
			fout = open('%s/run-summary.txt' % (self.results_path), 'w')
			
			print 'Parameters'
			fout.write('Parameters\n')
			print '--- pockets: %d' % (self.config.num_pockets)
			fout.write('--- pockets: %d\n' % (self.config.num_pockets))
			print '--- agents: %d' % (self.config.num_agents)
			fout.write('--- agents: %d\n' % (self.config.num_agents))
			print '--- supporters per leader: %d' % (self.config.num_sup)
			fout.write('--- supporters per leader: %d\n' % (self.config.num_sup))
			print '--- do reset: %s' % (str(self.config.if_reset))
			fout.write('--- do reset: %s\n' % (str(self.config.if_reset)))
			print '--- prob radius: %f' % (self.hist_obj.prob_radius)
			fout.write('--- prob radius: %f\n' % (self.hist_obj.prob_radius))
			print '--- prob of ls: %f' % (self.config.test_ls_prob)
			fout.write('--- prob of ls: %f\n' % (self.config.test_ls_prob))
			print '--- simulated annealing decrease factor: %f' % (self.config.test_ls_fact)
			fout.write('--- simulated annealing decrease factor: %f\n' % (self.config.test_ls_fact))
			print '--- prob of jump before ls: %f' % (self.config.test_jump_prob)
			fout.write('--- prob of jump before ls: %f\n' % (self.config.test_jump_prob))
			print '--- jump decrease factor: %f' % (self.config.test_jump_fact)
			fout.write('--- jump decrease factor: %f\n' % (self.config.test_jump_fact))
			print '--- initial temperature for simulated annealing: %d' % (self.config.test_temp_init)
			fout.write('--- initial temperature for simulated annealing: %d\n' % (self.config.test_temp_init))
			print '--- initial max jump distance: %f' % (self.config.test_jump_dist)
			fout.write('--- initial max jump distance: %f\n' % (self.config.test_jump_dist))
			print '--- generations without improvements: %d' % (self.config.test_noimprove)
			fout.write('--- generations without improvements: %d\n' % (self.config.test_noimprove))
			print '--- prob of crossover: %f' % (self.config.crossover_prob)
			fout.write('--- prob of crossover: %f\n' % (self.config.crossover_prob))
			
			fout.close()
			
			for i in range(0, self.config.num_pockets):
				if self.agent.pockets[i] != None:
					self.agent.pockets[i].pose.dump_pdb('%s/pocket-%02d.pdb' % (self.results_path, i))
		
		fout = open('%s/log-agent-%02d.txt' % (self.results_path, self.id), 'w')

		print '\n%s' % (self.agent)
		fout.write('%s\n' % (self.agent))
		print 'Total generation of agent_%02d: %d' % (self.id, self.agent.generation)
		fout.write('Total generation of agent_%02d: %d\n' % (self.id, self.agent.generation))
		print 'Total restarts of agent_%02d: %d' % (self.id, self.agent.restarts)
		fout.write('Total restarts of agent_%02d: %d\n' % (self.id, self.agent.restarts))
		print 'Total time LocalSearch of agent_%02d: %s' % (self.id, str(self.agent.time_ls))
		fout.write( 'Total time LocalSearch of agent_%02d: %s\n' % (self.id, str(self.agent.time_ls)))
		print 'Total time Diversity calculations of agent_%02d: %s' % (self.id, str(self.agent.time_div))
		fout.write( 'Total time Diversity calculations of agent_%02d: %s\n' % (self.id, str(self.agent.time_div)))
		print 'Total time SEND of agent_%02d: %s' % (self.id, str(self.agent.time_send))
		fout.write( 'Total time SEND of agent_%02d: %s\n' % (self.id, str(self.agent.time_send)))
		print 'Total transactions SEND of agent_%02d: %s' % (self.id, str(self.agent.trx_send))
		fout.write( 'Total transactions SEND of agent_%02d: %s\n' % (self.id, str(self.agent.trx_send)))
		print 'Total time RECEIVE of agent_%02d: %s' % (self.id, str(self.agent.time_receive))
		fout.write( 'Total time RECEIVE of agent_%02d: %s\n' % (self.id, str(self.agent.time_receive)))
		print 'Total transactions RECEIVE of agent_%02d: %s\n' % (self.id, str(self.agent.trx_receive))
		fout.write( 'Total transactions RECEIVE of agent_%02d: %s\n\n' % (self.id, str(self.agent.trx_receive)))
		
		fout.close()
		self.agent.status_write('%s/log-agent-%02d.txt' % (self.results_path, self.id))
	
	
	def make_server_manager(self, port, authkey):
		queue_send = Queue(1)
		queue_recv = Queue()
		queue_div_recv = Queue(1)
		queue_reset_send = Queue(1)
		queue_reset_recv = Queue(1)
		stop_event = Event()
		energy_number = Queue(1)
		
		class ServerManager(SyncManager):
			pass

		ServerManager.register('get_queue_send', callable=lambda: queue_send)
		ServerManager.register('get_queue_recv', callable=lambda: queue_recv)
		ServerManager.register('get_queue_div_recv', callable=lambda: queue_div_recv)
		ServerManager.register('get_queue_reset_send', callable=lambda: queue_reset_send)
		ServerManager.register('get_queue_reset_recv', callable=lambda: queue_reset_recv)
		ServerManager.register('get_stop_event', callable=lambda: stop_event)
		ServerManager.register('get_energy_number', callable=lambda: energy_number)

		manager = ServerManager(address=('', port), authkey=authkey)
		manager.start()
		print 'Agent %d server started at port %d' % (self.id, port)
		return manager
	
	def make_div_server_manager(self, port, authkey):
		queue_div_recv = Queue()
		class ServerManager(SyncManager):
			pass
		ServerManager.register('get_queue_div_recv', callable=lambda: queue_div_recv)
		manager = ServerManager(address=('', port), authkey=authkey)
		manager.start()
		print 'Agent %d div server started at port %d' % (self.id, port)
		return manager
	
	
	def make_client_manager(self, host, port, authkey):
		class ClientManager(SyncManager):
			pass

		ClientManager.register('get_queue_send')
		ClientManager.register('get_queue_recv')
		ClientManager.register('get_queue_div_recv')
		ClientManager.register('get_queue_reset_send')
		ClientManager.register('get_queue_reset_recv')
		ClientManager.register('get_stop_event')
		ClientManager.register('get_energy_number')

		manager = ClientManager(address=(host, port), authkey=authkey)
		manager.connect()
		print 'Agent %d client connected to %s at port %d ' % (self.id, host, port)
		return manager
	
	def make_div_client_manager(self, host, port, authkey):
		class ClientManager(SyncManager):
			pass
		ClientManager.register('get_queue_div_recv')
		manager = ClientManager(address=(host, port), authkey=authkey)
		manager.connect()
		print 'Agent %d div client connected to %s at port %d ' % (self.id, host, port)
		return manager
	
	def run_servers(self):
		servers = [None for i in range(0, self.config.num_sup)]
		div_servers = [None for i in range(1, self.config.num_agents)]
		
		if self.agent.id_leader == None:
			for i in range(1, self.config.num_agents):
				port = self.config.root_hosts[i][1]
				div_servers[i-1] = self.make_div_server_manager(port, '')
				self.agent_div_recv[i-1] = div_servers[i-1].get_queue_div_recv()
			servers += div_servers
		
		for i in range(0, self.config.num_sup):
			host = self.config.hosts[self.agent.id_supporters[i]][0]
			port = self.config.hosts[self.agent.id_supporters[i]][1]
			path = self.config.hosts[self.agent.id_supporters[i]][2]
			servers[i] = self.make_server_manager(port, '')
			self.support_send[i] = servers[i].get_queue_send()
			self.support_recv[i] = servers[i].get_queue_recv()
			self.support_div_recv[i] = servers[i].get_queue_div_recv()
			self.support_reset_send[i] = servers[i].get_queue_reset_send()
			self.support_reset_recv[i] = servers[i].get_queue_reset_recv()
			self.support_stop_event[i] = servers[i].get_stop_event()
			self.support_energy_number[i] = servers[i].get_energy_number()
			
			if not os.path.exists(self.config.logs_path):
				os.makedirs(self.config.logs_path)
			
			argv = (str(self.config.protein) + ' ' + str(self.config.num_levels) + ' ' + str(self.config.num_sup) + ' ' + str(self.config.max_agents) + ' ' +
					str(self.config.num_pockets) + ' ' + str(self.config.if_reset) + ' ' + str(self.config.test_noimprove) + ' ' + str(self.config.score_weight) + ' ' +
					str(self.config.sasa_weight) + ' ' + str(self.config.energy_limit) + ' ' + str(self.agent.id_supporters[i]))
			cmd = 'python memetic_parallel.py %s %d > %s/memetic_parallel_%03d_agent-%02d.log 2>&1' % (argv, self.log_id, self.config.logs_path, self.log_id, self.agent.id_supporters[i])
			subprocess.Popen(['ssh', host, 'cd ' + path + ' && ' + cmd], stdin = None, stdout = None, stderr = None)
		
		return servers
	
	def run_client(self):
		host = self.config.hosts[self.agent.id_leader][0]
		port = self.config.hosts[self.id][1]
		root_host = self.config.root_hosts[0][0]
		root_port = self.config.root_hosts[self.id][1]
		client = self.make_client_manager(host, port, '')
		root_client = self.make_div_client_manager(root_host, root_port, '')
		self.leader_send = client.get_queue_recv()
		self.leader_recv = client.get_queue_send()
		self.root_div_send = root_client.get_queue_div_recv()
		self.leader_div_send = client.get_queue_div_recv()
		self.leader_reset_send = client.get_queue_reset_recv()
		self.leader_reset_recv = client.get_queue_reset_send()
		self.stop_event = client.get_stop_event()
		self.energy_number = client.get_energy_number()
	
	def run(self):
		if self.agent.id_leader != None:
			self.run_client()
		
		if self.agent.id_supporters:
			servers = self.run_servers()
		
		jump_radius_aux = self.config.test_jump_dist
		self.agent.current.init_solution(self.hist_obj)
		self.agent.update()
		
		print 'WorkerProcess %d: \n%s' % (self.id, self.agent)
		
		start_process_time = datetime.datetime.now()
		self.agent.generation = 1
		
		best_energy = self.agent.pockets[0].energy_value
		gens_without_improve = 0
		gens_convergence = self.config.test_noimprove
		gens_start = 0
		restart_successed = True
		restarts_failed = 0
		energy_calls = self.agent.current.energy_calls
		support_energy_calls = [0 for i in range(0, self.config.num_sup)]
		self.agent.status_log_append(datetime.datetime.now() - start_process_time, energy_calls)
		
		
		while(self.stop_event.is_set() == False):
			
			# Crossover it isn't allowed to execute on agent 0
			if self.agent.id_leader != None:
				if self.agent.leader_pockets[0] != None:
					index_pocket_leader_agent = self.fitness_roulette_selection(self.agent.leader_pockets)
					index_pocket_self_agent = self.select_rand_solution(self.agent.pockets)
					self.agent.crossover(self.agent.leader_pockets[index_pocket_leader_agent], self.agent.pockets[index_pocket_self_agent], self.config.crossover_prob)
			else:
				index_pocket_self_agent = self.select_rand_solution(self.agent.pockets)
				self.agent.current = copy.deepcopy(self.agent.pockets[index_pocket_self_agent])
			
			# Local search
			time_ls_start = datetime.datetime.now()
			self.agent.simulated_annealing(self.config.ls_prob_ss, self.config.test_ls_fact, self.config.test_jump_prob, jump_radius_aux, self.config.test_temp_init, self.hist_obj)
			self.agent.time_ls += datetime.datetime.now() - time_ls_start
			jump_radius_aux = jump_radius_aux * self.config.test_jump_fact
			
			updated = self.agent.update()
			
			# Update pockets with supporter data
			if self.agent.id_supporters:
				for i in range(0, self.config.num_sup):
					while not self.support_recv[i].empty():
						self.receive_solution_pickle(self.support_recv[i], True)
						print '>> WorkerProcess %d receive a pocket from supporter %d, pocket list: %s' % (self.id, self.agent.id_supporters[i], self.agent.pockets)
			
			# Update pocket_leader with leader data
			if self.agent.id_leader != None:
				if not self.leader_recv.empty():
					self.receive_solution_pickle(self.leader_recv, False)
					print '>> WorkerProcess %d receive a list of pockets from leader %d' % (self.id, self.agent.id_leader)
					
			if updated or self.agent.update():
				# Send pocket_leader with leader data
				if self.agent.id_supporters:
					for i in range(0, self.config.num_sup):
						if not self.support_send[i].full():
							print '> WorkerProcess %d send a list of pockets to supporter %d' % (self.id, self.agent.id_supporters[i])
							self.send_solution_pickle(self.agent.pockets, self.support_send[i])

				# Send pockets with supporter data
				if self.agent.id_leader != None:
					if self.agent.pockets[0].energy_value < best_energy:
						if not self.leader_send.full():
							print '> WorkerProcess %d send a pocket to leader %d with energy: %d' % (self.id, self.agent.id_leader, self.agent.pockets[0].energy_value)
							self.send_solution_pickle(self.agent.pockets[0], self.leader_send)
			
			if self.config.calculate_div_density:
				# Diversity density calculations
				time_div_start = datetime.datetime.now()

				if self.agent.id_leader == None:
					for i in range(0, self.config.num_agents-1):
						if not self.agent_div_recv[i].empty():
							buff = self.agent_div_recv[i].get()
							agent_pockets = pickle.loads(buff)
							j = 0
							for p in agent_pockets:
								if p != None:
									self.agent.population_pockets[i][j] = copy.deepcopy(p)
								else:
									break
								j += 1

					if self.agent.id_supporters:
						for i in range(0, self.config.num_sup):
							if not self.support_div_recv[i].empty():
								buff = self.support_div_recv[i].get()
								supporter_pockets = pickle.loads(buff)
								j = 0
								for p in supporter_pockets:
									if p != None:
										self.agent.supporter_pockets[i][j] = copy.deepcopy(p)
									else:
										break
									j += 1
				else:
					if not self.root_div_send.full():
						buff = pickle.dumps(self.agent.pockets, 2)
						self.root_div_send.put(buff)

					if not self.leader_div_send.full():
						buff = pickle.dumps(self.agent.pockets, 2)
						self.leader_div_send.put(buff)

					if self.agent.id_supporters:
						for i in range(0, self.config.num_sup):
							if not self.support_div_recv[i].empty():
								buff = self.support_div_recv[i].get()
								supporter_pockets = pickle.loads(buff)
								j = 0
								for p in supporter_pockets:
									if p != None:
										self.agent.supporter_pockets[i][j] = copy.deepcopy(p)
									else:
										break
									j += 1

				self.agent.calculate_densities()
				self.agent.time_div += datetime.datetime.now() - time_div_start

			self.agent.generation += 1
			
			# Reset control
			if self.config.if_reset:
				if self.agent.id_leader == None:
					if self.agent.pockets[0].energy_value == best_energy:
						gens_without_improve += 1
					else:
						gens_without_improve = 0

					if gens_without_improve == gens_convergence:
						if self.agent.id_supporters:
							for i in range(0, self.config.num_sup):
								self.support_reset_send[i].put(0)
							for i in range(0, self.config.num_sup):
								last_solution = pickle.loads(self.support_reset_recv[i].get())
								if last_solution.energy_value < best_energy:
									restarts_failed += 1
									restart_successed = False
									self.agent.update(last_solution)
									best_energy = self.agent.pockets[0].energy_value
									gens_without_improve = 0
						if restart_successed:
							print '\n***Restart successed***\n'
							self.event_restart.set()
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(True)
						else:
							print '\n***Restart failed: %d***\n' % restarts_failed
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(False)
						restart_successed = True
				else:
					if not self.leader_reset_recv.empty():
						self.leader_reset_recv.get()
						if self.agent.id_supporters:
							for i in range(0, self.config.num_sup):
								self.support_reset_send[i].put(0)
							for i in range(0, self.config.num_sup):
								self.receive_solution_pickle(self.support_reset_recv[i], True)
						self.send_solution_pickle(self.agent.pockets[0], self.leader_reset_send)
						restart_successed = self.leader_reset_recv.get()
						if restart_successed:
							self.event_restart.set()
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(True)
						else:
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(False)
						restart_successed = True
				
				# Is event restart set?
				if self.event_restart.is_set():
					if self.agent.id_leader == None:
						# Only the root leader can keep the best solution
						self.agent.pockets = [self.agent.pockets[0]] + [None for i in range(1, self.config.num_pockets)]
						self.agent.population_pockets = [[None for i in range(0, self.config.num_pockets)] for i in range(1, self.config.num_agents)]
						for i in range(0, self.config.num_agents-1):
							if not self.agent_div_recv[i].empty():
								self.agent_div_recv[i].get()
					else:
						self.agent.pockets = [None for i in range(0, self.config.num_pockets)]
						self.agent.leader_pockets = [None for i in range(0, self.config.num_pockets)]
						if not self.leader_recv.empty():
							self.leader_recv.get()
					
					if self.agent.id_supporters:
						for i in range(0, self.config.num_sup):
							while not self.support_recv[i].empty():
								self.support_recv[i].get()
						
						self.agent.supporter_pockets = [[None for i in range(0, self.config.num_pockets)] for i in range(1, self.config.num_sup+1)]
						for i in range(0, self.config.num_sup):
							if not self.support_div_recv[i].empty():
								self.support_div_recv[i].get()
						
					self.agent.restarts += 1

					print 'RESTARTING %3d - WorkerProcess %2d - %s' % (self.agent.restarts, self.id, self.agent)
					self.agent.current.init_solution(self.hist_obj)
					self.agent.update()
					jump_radius_aux = self.config.test_jump_dist
					gens_convergence = self.config.test_noimprove + self.agent.generation - gens_convergence - gens_start
					gens_start = self.agent.generation
					gens_without_improve = 0
					self.event_restart.clear()
					print 'RESTARTED %3d - WorkerProcess %2d - %s' % (self.agent.restarts, self.id, self.agent)
			
			
			energy_calls = self.agent.current.energy_calls
			
			if self.agent.id_supporters:
				for i in range(0, self.config.num_sup):
					if not self.support_energy_number[i].empty():
						support_energy_calls[i] = self.support_energy_number[i].get()
					energy_calls += support_energy_calls[i]
			
			if not self.energy_number.full():
				self.energy_number.put_nowait(energy_calls)
			
			self.agent.status_log_append(datetime.datetime.now() - start_process_time, energy_calls)
			
			if self.agent.id_leader == None:
				if energy_calls > self.config.energy_limit:
					self.stop_event.set()
			
			best_energy = self.agent.pockets[0].energy_value
		
		if self.agent.id_supporters:
			for i in range(0, self.config.num_sup):
				self.support_stop_event[i].set()
		
		self.save_results()
		
		if self.agent.id_supporters:
			for i in range(0, self.config.num_sup):
				self.support_reset_recv[i].get()
				servers[i].shutdown()
		
		if self.agent.id_leader != None:
			self.leader_reset_send.put(0)
		
		if self.agent.id_leader == None:
			for i in range(self.config.num_sup, (self.config.num_agents + self.config.num_sup - 1)):
				servers[i].shutdown()
		
		print '\n************ WorkerProcess %d done ************\n' % (self.id)
コード例 #55
0
ファイル: TaskRunner.py プロジェクト: oencoding/liveDVR
class TaskRunner:

    entry_regex = '^([01]_\w{8})_([01]_\w{8})_(\d+)'
    pattern = re.compile(entry_regex)

    @staticmethod
    def match(directory_name):
        return TaskRunner.pattern.match(directory_name)

    @staticmethod
    def get_param(directory_name):
        m = re.search(TaskRunner.entry_regex, directory_name)
        entry_id = m.group(1)
        recorded_id = m.group(2)
        duration = m.group(3)
        param = {'entry_id': entry_id, 'directory': directory_name, 'recorded_id': recorded_id,
                 'duration': duration}

        return param

    def __init__(self, task, number_of_processes, output_directory, max_task_count, skipped_task_output):
        self.number_of_processes = number_of_processes
        self.task = task
        self.task_name = task.__name__
        self.polling_interval = get_config('polling_interval_sec', 'int')
        base_directory = get_config('recording_base_dir')
        hostname = gethostname()
        self.failed_tasks_handling_interval = get_config('failed_tasks_handling_interval', 'int')*60  # in minutes
        self.failed_tasks_max_retries = get_config('failed_tasks_max_retries')
        self.task_directory = os.path.join(base_directory, hostname, self.task_name)
        self.error_directory = os.path.join(base_directory, 'error')
        self.failed_tasks_directory = os.path.join(base_directory, hostname, self.task_name, 'failed')
        self.web_incoming_directory = os.path.join(base_directory, 'incoming')
        self.input_directory = os.path.join(base_directory, hostname, self.task_name, 'incoming')
        self.working_directory = os.path.join(base_directory, hostname, self.task_name, 'processing')
        self.output_directory = output_directory
        self.task_queue_size = max_task_count
        self.task_queue = Queue(max_task_count)
        self.logger = logging.getLogger(__name__+'-'+self.task_name)
        self.skipped_task_output = skipped_task_output
        self.on_startup()

    def on_startup(self):

        self.logger.info("onStartUp: %s", self.task_name)
        try:
            if not os.path.exists(self.task_directory):  # In case directory not exist
                os.makedirs(self.task_directory)

            if not os.path.exists(self.failed_tasks_directory):  # In case directory not exist
                os.makedirs(self.failed_tasks_directory)

            if not os.path.exists(self.input_directory):  # In case directory not exist
                os.symlink(self.web_incoming_directory, self.input_directory)

            if not os.path.exists(self.working_directory):  # In case directory not exist
                os.makedirs(self.working_directory)

            if not os.path.exists(self.output_directory):  # In case directory not exist
                os.makedirs(self.output_directory)

            if not os.path.exists(self.error_directory):  # In case directory not exist
                os.makedirs(self.error_directory)

            t = threading.Thread(target=self.schedule_job)
            t.daemon = True
            t.start()

        except os.error as e:
            self.logger.fatal("Error %s \n %s", str(e), traceback.format_exc())

    def keep_alive_message(self):
        self.logger.info("Keep alive")

    def schedule_job(self):
        schedule.every().day.at("00:01").do(self.keep_alive_message)

        while 1:
            schedule.run_pending()
            time.sleep(1)

    def move_and_add_to_queue(self, src_dir, queue_name):
        # In order to avoid starvation we need to handle files/directories in the order of creation.
        file_list = self.getSorterFileList(src_dir)

        for directory_name in file_list:
            if self.task_queue.full():
                self.logger.warn(
                    'cannot add tasks to queue. Queue is full!!! (max size {}, num processes {})'.format(
                        self.task_queue_size, self.number_of_processes))
                return
            directory_path = os.path.join(src_dir, directory_name)
            if self.match(directory_name) is not None:
                try:
                    if os.path.isdir(directory_path):
                        param = self.get_param(directory_name)
                        if queue_name == 'incoming':
                            self.reset_retry_count(directory_path)
                        if src_dir != self.working_directory:   # if its not the same directory
                            shutil.move(directory_path, self.working_directory)
                        self.task_queue.put(param, block=False)
                        self.logger.info("[%s-%s] Add unhandled directory %s from %s to the task queue",
                                         param['entry_id'], param['recorded_id'], directory_name, src_dir)
                    else:
                        self.logger.warn("Can't find the content of %s, move it to %s", directory_path,
                                         self.error_directory)
                        self.safe_move(directory_path, self.error_directory)

                except Q.Full:
                        self.logger.warn("Failed to add new task [%s-%s], queue is full!", param['entry_id'], param['recorded_id'])

                except Exception as e:
                        self.logger.error("[%s-%s] Error while try to add task:%s \n %s", param['entry_id'], param['recorded_id'], str(e), traceback.format_exc())

    def move_to_incoming_dir(self, src_dir, dst_dir):
        file_list = os.listdir(src_dir)

        for path in file_list:
            full_path = ""
            try:
                full_path = os.path.join(src_dir, path)
                self.safe_move(full_path, dst_dir)
                self.logger.info("successfully moved [{}] to [{}]".format(full_path, dst_dir))
            except (IOError, OSError) as e:
                if e.errno == 2:  # no such file or directory
                    self.logger.error("Failed to move job from [{}] [{}]. Error: no such file or directory".format(full_path, dst_dir))
                else:
                    self.logger.error("Failed to move job from [{}] to [{}]. Error {} \n {}. Moving to [{}]".format(full_path, dst_dir, str(e), traceback.format_exc(), self.error_directory))
                    self.safe_move(full_path, self.error_directory)



    def work(self, index):
        self.logger.info("Worker %s start working", index)
        while True:
            task_parameter = self.task_queue.get()
            logger_info = task_parameter['entry_id'] + '-' + task_parameter['recorded_id']
            self.logger.info("[%s] Task is performed by %d", logger_info, index)
            try:
                src = os.path.join(self.working_directory, task_parameter['directory'])

                job = self.task(task_parameter, logger_info)  # operate the function task_job, with argument task_parameters
                job.check_stamp()  # raise error if stamp is not valid
                job.run()
                job.check_stamp()
                shutil.move(src, self.output_directory)
                self.logger.info("[{}] Task {} completed, move {} to {}".format(logger_info, self.task_name, src,
                                 self.output_directory))
            except UnequallStampException as e:
                    self.logger.warning("[{}] skip processing, a newer job exits. Move to {}. Mismatch details: {}".format(logger_info, self.skipped_task_output, str(e)), exc_info=True)
                    self.safe_move(src, self.skipped_task_output)
            except Exception as e:
                self.logger.error("[{}] Failed to perform task :{}".format(logger_info, str(e)), exc_info=True)
                retries = self.get_retry_count(src)
                try:
                    if retries > 0:
                        self.logger.info("[%s] Job %s on entry %s has %s retries, move it to failed task directory ",
                                         logger_info, self.task_name, task_parameter['directory'], retries)
                        self.safe_move(src, self.failed_tasks_directory)
                    else:
                        self.logger.fatal("[%s] Job %s on entry %s has no more retries or failed to get it, move entry to "
                                      "failed task directory ", logger_info, self.task_name, task_parameter['directory'])
                        self.safe_move(src, self.error_directory)
                except Exception as e:
                    self.logger.fatal("[%s]  Failed to handle failure task %s \n %s", logger_info, str(e)
                                    , traceback.format_exc())


    def get_retry_count(self, src):
        try:
            retries_file_path = os.path.join(src, 'retries')
            if not os.path.exists(retries_file_path):
                with open(retries_file_path, "w") as retries_file:
                    retries_file.write(self.failed_tasks_max_retries)
                return self.failed_tasks_max_retries
            else:
                with open(retries_file_path, "r+") as retries_file:
                    retries = retries_file.read()
                    retries = int(retries) - 1
                    retries_file.seek(0)
                    retries_file.truncate()
                    retries_file.write(str(retries))

                return retries
        except Exception as e:
            self.logger.error("Failed to get retry count for %s: %s \n %s", src, str(e), traceback.format_exc())
            return 0

    def add_new_task_handler(self):
        thread = Timer(self.polling_interval, self.add_new_task_handler)
        thread.daemon = True
        thread.start()
        self.move_and_add_to_queue(self.input_directory, 'incoming')

    def failed_task_handler(self):
        thread = Timer(self.failed_tasks_handling_interval, self.failed_task_handler)
        thread.daemon = True
        thread.start()
        self.move_and_add_to_queue(self.failed_tasks_directory, 'failed')

    def start(self):
        try:
            self.logger.info("Starting %d workers", self.number_of_processes)
            self.move_to_incoming_dir(self.working_directory, self.input_directory)
            self.add_new_task_handler()
            self.failed_task_handler()
            workers = [Process(target=self.work, args=(i,)) for i in xrange(1, self.number_of_processes+1)]
            for w in workers:
                w.start()

        except Exception as e:
            self.logger.fatal("Failed to start task runner: %s  \n %s ", str(e), traceback.format_exc())
        finally:
            return workers

    def safe_move(self, src, dst):
        try:
            try:
                shutil.move(src, dst)
            except shutil.Error as e:
                file_name = os.path.basename(src)
                new_file_name = ''.join([file_name, '_', str(time.time())])
                new_dest = os.path.join(dst, new_file_name)
                self.logger.error("Failed to move %s into %s, try to move it as %s", src, dst, new_dest)
                shutil.move(src, new_dest)
            except IOError as e:
                if e.errno == 2:  # no such file or directory
                    self.logger.warn("No such file or directory: maybe job was taken by other machine")
                else:
                    raise e

        except Exception as e:
            self.logger.error("Failed to move %s to %s : %s \n %s", src, dst, str(e), traceback.format_exc())

    def getSorterFileList(self, src_dir):
        file_list = os.listdir(src_dir)
        file_list_with_ctime = []
        full_path = ""
        for path in file_list:
            try:
                full_path = os.path.join(src_dir, path)
                dir_update_time = os.stat(full_path).st_ctime
                file_list_with_ctime.append((path, dir_update_time))
            except (IOError, OSError) as e:
                if e.errno == 2:  # no such file or directory
                    self.logger.error("Failed to stat [{}]. Error: no such file or directory. Moving to [{}]".format(full_path, self.error_directory))
                else:
                    self.logger.error("Failed to stat [{}]. Error {} \n {}. Moving to [{}]".format(full_path, str(e), traceback.format_exc(), self.error_directory))
                self.safe_move(full_path, self.error_directory)

        sorted_file_list_with_ctime = sorted(file_list_with_ctime, key=lambda file_data: file_data[1])
        sorted_file_list = []
        for file_data in sorted_file_list_with_ctime:
            sorted_file_list.append(file_data[0])

        return sorted_file_list

    def reset_retry_count(self, src):
        try:
            retries_file_path = os.path.join(src, 'retries')
            if os.path.exists(retries_file_path):
                with open(retries_file_path, "w") as retries_file:
                    retries_file.write(self.failed_tasks_max_retries)
        except Exception as e:
            self.logger.error('Failed to reset retries count for {}: {} \n'.format(src, str(e)), exc_info=True)
コード例 #56
0
ファイル: zdict.py プロジェクト: apua/zdict
def wordlearn():
    wordlist = list(db.items())
    wordlist.sort(key=seckey)
    size = len(wordlist)
    totalcount = 0.0
    right = 0.0
    lookup = Queue(maxsize=5)
    answer = Queue(maxsize=5)
    lookuper = Process(target=answers, args=(lookup, answer))
    lookuper.daemon = True
    lookuper.start()

    if size <= 1:
        print("There must be at least two words needed in the list.")
        exit()

    while 1:
        while not lookup.full():
            k = wordlist[int(random.triangular(0, size - 1, 0))][0]
            k = k.lower()
            lookup.put(k)
        result = answer.get()
        if result is None:
            continue
        k = result.key.text
        if k not in db:
            continue
        s = result.show()
        s = s.replace(k, "####")
        s = s.replace(k.upper(), "####")
        s = s.replace(k[0].swapcase() + k[1:].lower(), "####")
        print(s)
        speak(result)
        word = input("Input :")

        if word == k.lower():
            print("Bingo!")
            right += 1
            db[k] += 1
            if db[k] >= 100:
                db[k] = 100
        else:
            db[k] -= 3
            if db[k] < 0:
                db[k] = 0
            print("WRONG! Correct answer is : ", k)
            try:
                word = input("(d) Delete, (enter) Continue: ")
                if word == "d":
                    del db[k]
                    wordlist = list(db.items())
                    wordlist.sort(key=seckey)
                    size = len(wordlist)
                    if size <= 1:
                        print("There must be at least two words " "needed in the list.")
                        exit()
            except KeyboardInterrupt:
                result(right, totalcount)

        totalcount += 1
        if totalcount % (int(size / 4) + 1) == 0:
            wordlist = list(db.items())
            wordlist.sort(key=seckey)
コード例 #57
0
ファイル: main.py プロジェクト: Nryanicus/giftris
def main():
    #### Initialisation ####
    
    ## get config data
    WIDTH = ctypes.windll.user32.GetSystemMetrics(0)
    HEIGHT = ctypes.windll.user32.GetSystemMetrics(1)
    CONSTS = {"WIDTH":WIDTH,"HEIGHT":HEIGHT}
    with open("config.txt","r") as config:
        for line in config:
            if line[0] == "#" or line == "\n": continue
            key,value = line.split(" ")
            value = value.strip()
            CONSTS[key] = value
            
    ## get gif library
    gif_path = CONSTS['SOURCE_DIRC']
    assert os.path.exists(gif_path), gif_path+" is not an valid directory"
    all_files = os.listdir(gif_path)
    gif_files = []
    for file in all_files:
        if file[-4:] == ".gif":
            gif_files.append(gif_path+"\\"+file)
            
    ## Packer process Setup
    rect_library = {}
    for filename in gif_files:
        rect_library[filename] = Rect(filename)
    
    # Queues
    pack_in = Queue()    # Queue of rects names
    remove_in = Queue()  # Queue of rect names
    pack_out = Queue()   # Queue of Rects
    remove_out = Queue() # Queue of rects names
    
    rect_in = Queue()    # Queue of rects
    gif_out = Queue()    # Queue of gifs
    
    initial_rects = rect_library.keys()
    shuffle(initial_rects)
    packer = RectPackerManager(pack_in, remove_in, pack_out, remove_out, rect_library, initial_rects, CONSTS)
    packer.start()
    
    ## GifLoader process Setup
    loader = GifLoader(rect_in, gif_out, CONSTS)
    loader.start()
    
    ## pygame bootstrappery
    pygame.init()
    # move display window to top left of screen
    os.environ['SDL_VIDEO_WINDOW_POS'] = '0,0'
    screen = pygame.display.set_mode((WIDTH, HEIGHT),pygame.NOFRAME)
    background = pygame.Surface(screen.get_size())
    background = background.convert()
    # pleasant dark grey
    background.fill((20, 20, 20))
    pygame.display.set_caption('Giftris')
    clock = pygame.time.Clock()
    
    active_sprites = pygame.sprite.Group()
    active_gifs = {}
    
    #### Application Loop ####
    dt = 0
    rects_to_add = [] # rects which need to be added to the packer
    added_rects = initial_rects  # rects which have been added, and must not be again
    completed_gifs = [] # gifs whose animation is done and must be removed from the packer
    packed_rects = [] # rects who have been packed and must have their animation created
    loading_gif = None
    loading_func = None
    j = 0
    while True:
        ## Packer Communication
        
        # if packer is empty, refill it
        if packer.empty():
            rects_to_add = rect_library.keys()
            # don't add rects which are still onscreen to the packer, as this 
            # would cause filenames to be ambiguous ids. And be boring
            for rect_name in added_rects:
                if rect_name in rects_to_add:
                    rects_to_add.remove(rect_name)
            shuffle(rects_to_add)
        # if there are any rects have not been sent, send them    
        i = 0
        for rect_name in rects_to_add:
            if not pack_in.full():
                assert not rect_name in added_rects
                pack_in.put_nowait(rect_name)
                added_rects.append(rect_name)
                i += 1
            else: # keep whatever gifs weren't queued for next iteration 
                rects_to_add = rects_to_add[i:]
                break
        else: # all completed gifs got queued, so empty list
            rects_to_add = []
        
        # send completed gif ids to packer 
        i = 0
        for filename in completed_gifs:
            if not remove_in.full():
                remove_in.put_nowait(filename)
                i += 1
            else:
                completed_gifs = completed_gifs[i:]
                break
        else:
            completed_gifs = []
            
        # get removed rects and kill sprites
        removed_rects = []
        while not remove_out.empty():
            rect_name = remove_out.get_nowait()
            removed_rects.append(rect_name)
            added_rects.remove(rect_name)
        for rect_name in removed_rects:
            gif = active_gifs.pop(rect_name)
            gif.kill()
            active_sprites.remove(gif)
        
        ## add new sprites
        # get packed rects from packer
        while not pack_out.empty():
            rect = pack_out.get_nowait()
            packed_rects.append(rect)
            assert not rect.filename in active_gifs, rect.filename
            assert not loading_gif or not rect.filename == loading_gif.filename, rect.filename
        
        # send rects to have it's animation loaded
        i = 0
        for rect in packed_rects:
            if not rect_in.full():
                assert not rect.filename in active_gifs
                rect_in.put_nowait(rect)
                #print "putting into loader",rect.filename
                i += 1
            else:
                packed_rects = packed_rects[i:]
                break
        else:
            packed_rects = []
        
        # receive loaded animations and add them to rendering pipeline
        if not loading_gif and not gif_out.empty():
            loading_gif = gif_out.get_nowait()
            #print "getting from loader",loading_gif.filename
            loading_func = loading_gif.ready()
        # continue loading gifs
        if loading_gif:
            complete = False
            try:
                while loading_func.next():
                    j += 1
                    pass
            except StopIteration:
                complete = True
            if complete:
                j = 0
                assert not loading_gif.filename in active_gifs, loading_gif.filename
                active_gifs[loading_gif.filename] = loading_gif
                active_sprites.add(loading_gif)
                loading_gif = None
                loading_func = None
                complete = False
        
        ## Render
        screen.blit(background, (0,0))
        
        active_sprites.draw(screen)
        pygame.display.update()
        active_sprites.update(active_sprites, dt, completed_gifs)
        
        ## Framerate
        dt = clock.tick(30)
        ## IO
        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    pygame.quit()
                    sys.exit()
                '''if event.key == pygame.K_SPACE:
                    import code
                    code.interact(local=locals())'''
                    
        # DEBUG:
        if not packer.is_alive():
            return