示例#1
0
def benchmark_iterator(components: List[str], max_builds: int) -> Iterator:
    for component, category in showfast_iterator(components=components):
        curr_metric, curr_release = None, None
        queue = LifoQueue(maxsize=max_builds)

        for benchmark in get_benchmarks(component, category):
            if not benchmark['hidden']:
                release = parse_release(benchmark['build'])

                if curr_metric != benchmark['metric']:
                    curr_metric, curr_release = benchmark['metric'], release
                    queue.queue.clear()

                if release != curr_release:
                    curr_release = release
                    queue.queue.clear()

                if queue.full():
                    yield benchmark
                else:
                    queue.put(benchmark)
class IOULoader:
    def __init__(self,
                 store_tracker,
                 detectionLoader,
                 Frames_lib,
                 tracker,
                 tracker_id,
                 tracker_cnt,
                 feature_model,
                 queueSize=1024):
        self.detectionLoader = detectionLoader
        self.stopped = False
        self.Q = LifoQueue(maxsize=queueSize)
        self.Frames_Lib = Frames_lib
        self.store_tracker = store_tracker
        self.feature_model = feature_model
        self.cam_number = 1

    def start(self):
        # start a thread to read frames from the file video stream

        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        while True:
            (orig_img, frame_id, bbox, bbox_score, kp, kp_score,
             imgs) = self.detectionLoader.read()
            (Frames_Lib, tracker, tracker_id,
             tracker_cnt) = self.store_tracker.read()
            #print('iou tracker_id')
            with self.detectionLoader.Q.mutex:
                self.detectionLoader.Q.queue.clear()
            with torch.no_grad():
                if bbox == None or len(bbox) == 0:
                    if self.Q.full():
                        time.sleep(2)
                    self.Q.put(
                        (orig_img, frame_id, self.cam_number, Frames_Lib, [[]],
                         [[]], [[]], tracker, tracker_id, tracker_cnt))
                    continue
                rois, ids, features, imgs,score_cam = load_data(self.cam_number, frame_id, bbox, bbox_score, imgs,\
                    self.feature_model)  # 获取当前时刻roi信息
                # 单通道IOU追踪更新
                ID_list_cam = Hungarian_match(
                    rois, self.cam_number, tracker,
                    tracker_id)  # 根据匈牙利匹配算法对IoU进行匹配,得到ID列表,不确定的为-1
                if self.Q.full():
                    time.sleep(2)
                self.Q.put((orig_img,frame_id,self.cam_number, Frames_Lib, ID_list_cam, rois, features,\
                        tracker,tracker_id, tracker_cnt))

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def len(self):
        # return queue len
        return self.Q.qsize()
class DetectionLoader:
    def __init__(self, dataloder, batchSize=1, queueSize=1024):
        self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
        self.det_model.load_weights('models/yolo/yolov3-spp.weights')
        self.det_model.net_info['height'] = opt.inp_dim
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        self.det_model.cuda()
        self.det_model.eval()
        self.stopped = False
        self.dataloder = dataloder
        self.batchSize = batchSize
        # initialize the queue used to store frames read from
        # the video file
        self.Q = LifoQueue(maxsize=queueSize)
        pose_dataset = Mscoco()
        if opt.fast_inference:
            self.pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
        else:
            self.pose_model = InferenNet(4 * 1 + 1, pose_dataset)
        self.pose_model.cuda()
        self.pose_model.eval()

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        while True:
            (img, orig_img, im_name, im_dim_list) = self.dataloder.getitem()

            with self.dataloder.Q.mutex:
                self.dataloder.Q.queue.clear()
            with torch.no_grad():
                # Human Detection
                #img = img.cuda()
                img = img.cuda()
                prediction = self.det_model(img, CUDA=True)
                # im_dim_list = im_dim_list.cuda()
                frame_id = int(im_name.split('.')[0])
                # NMS process
                dets = dynamic_write_results(prediction,
                                             opt.confidence,
                                             opt.num_classes,
                                             nms=True,
                                             nms_conf=opt.nms_thesh)
                if isinstance(dets, int) or dets.shape[0] == 0:
                    if self.Q.full():
                        time.sleep(2)
                    self.Q.put(
                        (orig_img, frame_id, None, None, None, None, None))
                    continue

                dets = dets.cpu()
                im_dim_list = torch.index_select(im_dim_list, 0,
                                                 dets[:, 0].long())
                scaling_factor = torch.min(self.det_inp_dim / im_dim_list,
                                           1)[0].view(-1, 1)

                # coordinate transfer
                dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 0].view(-1, 1)) / 2
                dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 1].view(-1, 1)) / 2

                dets[:, 1:5] /= scaling_factor
                for j in range(dets.shape[0]):
                    dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0,
                                                  im_dim_list[j, 0])
                    dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0,
                                                  im_dim_list[j, 1])

                boxes = dets[:, 1:5]
                scores = dets[:, 5:6]
                # Pose Estimation
                inp = im_to_torch(orig_img)
                inps = torch.zeros(boxes.size(0), 3, opt.inputResH,
                                   opt.inputResW)
                pt1 = torch.zeros(boxes.size(0), 2)
                pt2 = torch.zeros(boxes.size(0), 2)
                inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
                inps = Variable(inps.cuda())
                hm = self.pose_model(inps)
                if boxes is None:
                    if self.Q.full():
                        time.sleep(2)
                    self.Q.put(
                        (orig_img, frame_id, None, None, None, None, None))
                    continue
                else:
                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm.cpu(), pt1, pt2, opt.inputResH, opt.inputResW,
                        opt.outputResH, opt.outputResW)
                    bbox, b_score, kp, kp_score, roi = pose_nms(
                        orig_img, boxes, scores, preds_img, preds_scores)
                    # result = {
                    #     'imgname': im_name,
                    #     'result': result,
                    #     'orig_img' : orig_img
                    # }

                if self.Q.full():
                    time.sleep(2)
                #self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
                #self.Q.put((result, orig_img, im_name))
                self.Q.put(
                    (orig_img, frame_id, bbox, b_score, kp, kp_score, roi))

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def len(self):
        # return queue len
        return self.Q.qsize()
示例#4
0
文件: q_vid.py 项目: Tubbz-alt/AAMS
class VideoStream:
    def __init__(self, queueSize=128):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        global capfps
        global capwid 
        global caphei
        if (args.w):
            cap = cv.VideoCapture(0)
            capfps = cap.get(cv.CAP_PROP_FPS)
        else:
            cap = cv.VideoCapture("rtsp://*****:*****@192.168.1.64/Streaming/Channels/3")
            capfps = 30
        capwid = round(cap.get(cv.CAP_PROP_FRAME_WIDTH))
        caphei = round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
        self.stream = cap
        self.stopped = False
 
        # initialize the queue used to store frames read from
        # the video file
        self.Q = LifoQueue(maxsize=queueSize)

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                return
 
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.stream.read()
 
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
 
                # add the frame to the queue
                self.Q.put(frame)

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def clearQ(self):
        with self.Q.mutex:
            self.Q.queue.clear()
        return self.Q.empty()

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
class DetectionLoader:
    def __init__(self, dataloder, batchSize=1, queueSize=1024):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.det_model = Darknet("/home/a/roborts_project/src/alpha_pose/src/yolo/cfg/yolov3-spp.cfg")
        self.det_model.load_weights('/home/a/roborts_project/src/alpha_pose/src/models/yolo/yolov3-spp.weights')
        self.det_model.net_info['height'] = opt.inp_dim
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        self.det_model.cpu()
        self.det_model.eval()

        self.stopped = False
        self.dataloder = dataloder
        self.batchSize = batchSize
        
        # initialize the queue used to store frames read from
        # the video file
        self.Q = LifoQueue(maxsize=queueSize)

    def start(self):
        p = Thread(target=self.update, args=())
        p.daemon = True
        p.start()
        return self

    def update(self):
        # keep looping the whole dataset
        while True:
            img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
            with self.dataloder.Q.mutex:
                self.dataloder.Q.queue.clear()

            with torch.no_grad():
                # Human Detection
                img = img.cpu()
                prediction = self.det_model(img, CUDA=False)
                # NMS process
                dets = dynamic_write_results(prediction, opt.confidence,
                                    opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
                if isinstance(dets, int) or dets.shape[0] == 0:
                    for k in range(len(orig_img)):
                        if self.Q.full():
                            time.sleep(2)
                        self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
                    continue
                dets = dets.cpu()
                im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
                scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)

                # coordinate transfer
                dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
                dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2

                
                dets[:, 1:5] /= scaling_factor
                for j in range(dets.shape[0]):
                    dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
                    dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
                boxes = dets[:, 1:5]
                scores = dets[:, 5:6]

            for k in range(len(orig_img)):
                boxes_k = boxes[dets[:,0]==k]
                if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
                    if self.Q.full():
                        time.sleep(2)
                    self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
                    continue
                inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
                pt1 = torch.zeros(boxes_k.size(0), 2)
                pt2 = torch.zeros(boxes_k.size(0), 2)
                if self.Q.full():
                    time.sleep(2)
                self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def len(self):
        # return queue len
        return self.Q.qsize()
示例#6
0
class WebcamLoader:
    def __init__(self, webcam, batchSize=1, queueSize=256):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        # self.stream = cv2.VideoCapture('http://ivi.bupt.edu.cn/hls/cctv5phd.m3u8')
        # 扩展功能,添加对视频流的读取功能。
        self.webcam = webcam
        print(f'{get_time_now()} start capture webcam: {webcam}')
        if isinstance(webcam, str):
            self.stream = cv2.VideoCapture(webcam)
        elif isinstance(webcam, int):
            self.stream = cv2.VideoCapture(int(webcam))
        else:
            self.stream = None
        self.stopped = False
        if not self.stream.isOpened():
            print(f'Cannot capture source {self.webcam}')
            self.stopped = True
        print(f'{get_time_now()}capture webcam {webcam} success')
        # initialize the queue used to store frames read from
        # the video file
        self.batchSize = batchSize
        self.Q = LifoQueue(maxsize=queueSize)

    def reLoadStream(self):
        print(f'{get_time_now()} start capture webcam: {self.webcam}')
        if isinstance(self.webcam, str):
            self.stream = cv2.VideoCapture(self.webcam)
        elif isinstance(self.webcam, int):
            self.stream = cv2.VideoCapture(int(self.webcam))
        else:
            self.stream = None
        if not self.stream.isOpened():
            print(f'{get_time_now()} Cannot capture source {self.webcam}')
            self.stopped = True
        print(f'{get_time_now()}capture webcam {self.webcam} success')

    def start(self):
        if self.stopped:
            return
        # start a thread to read frames from the file video stream
        t = threading.Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        print(f'WebcamLoader_update_thread: {threading.currentThread().name}')
        # keep looping infinitely
        i = 0
        is_disconnet = False
        while not self.stopped:
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                img = []
                orig_img = []
                im_name = []
                im_dim_list = []
                for k in range(self.batchSize):
                    (grabbed, frame) = self.stream.read()
                    # if the `grabbed` boolean is `False`, then we have
                    # reached the end of the video file or we disconnect

                    if not grabbed:
                        print(f'{get_time_now()} read frame,grabbed:{grabbed}')
                        is_disconnet = True
                        break
                    inp_dim = int(opt.inp_dim)
                    img_k, orig_img_k, im_dim_list_k = prep_frame(
                        frame, inp_dim)

                    img.append(img_k)
                    orig_img.append(orig_img_k)
                    im_name.append(str(i) + '.jpg')
                    im_dim_list.append(im_dim_list_k)

                if is_disconnet:
                    break

                if is_disconnet:
                    break

                with torch.no_grad():
                    # Human Detection
                    img = torch.cat(img)
                    im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)

                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i = i + 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()
        # 视频断线重连
        if is_disconnet:
            self.reLoadStream()
            self.update()

    def videoinfo(self):
        # indicate the video info
        fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
        fps = self.stream.get(cv2.CAP_PROP_FPS)
        frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
                     int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        return (fourcc, fps, frameSize)

    def getitem(self):
        # return next frame in the queue
        return self.Q.get()

    def len(self):
        # return queue size
        return self.Q.qsize()

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
# demonstrate stack implementation
# using queue module

from queue import LifoQueue

# Initializing a stack
stack = LifoQueue(maxsize=3)

# qsize() show the number of elements
# in the stack
print(stack.qsize())

# put() function to push
# element in the stack
stack.put('a')
stack.put('b')
stack.put('c')

print("Full: ", stack.full())
print("Size: ", stack.qsize())

# get() function to pop
# element from stack in
# LIFO order
print('\nElements popped from the stack')
print(stack.get())
print(stack.get())
print(stack.get())

print("\nEmpty: ", stack.empty())
Queue module also has a LIFO Queue, which is basically a Stack. Data is inserted into Queue using put() function and get() takes data out from the Queue. 
There are various functions available in this module: 
 

maxsize – Number of items allowed in the queue.
empty() – Return True if the queue is empty, False otherwise.
full() – Return True if there are maxsize items in the queue. If the queue was initialized with maxsize=0 (the default), then full() never returns True.
get() – Remove and return an item from the queue. If queue is empty, wait until an item is available.
get_nowait() – Return an item if one is immediately available, else raise QueueEmpty.
put(item) – Put an item into the queue. If the queue is full, wait until a free slot is available before adding the item.
put_nowait(item) – Put an item into the queue without blocking.
qsize() – Return the number of items in the queue. If no free slot is immediately available, raise QueueFull.

"""

from queue import LifoQueue

stack = LifoQueue()
stack.put("a")
stack.put("b")
stack.put("c")
stack.put("d")

print("the size of the stack :", stack.qsize())
print("boolean is full or not :", stack.full())
print("to check whether empty or not :", stack.empty())

print("the elements in the stack are LIFO :", end=" ")
for i in range(stack.qsize()):
    print(stack.get(i), end=" ")
h
示例#9
0
print(stack.pop())
print(stack.pop())

print("\n stack after elements are popped out: ", stack)

### implementation using queue module

from queue import LifoQueue

#initializing a stack
stack = LifoQueue(maxsize=3)  # maxsize- number of items allowed in the queue
print(stack.qsize())  # qsize()- give maxsize of the stack

#put()- function to push
stack.put("black")
stack.put("blue")
stack.put("white")

print("full: ", stack.full()
      )  #full() – Return True if there are maxsize items in the queue.
# If the queue was initialized with maxsize=0 (the default), then full() never returns True.
print("size: ", stack.qsize())
# using get() function to pop out the element
print("\nelements popped from the stack: ")
print(stack.get())
print(stack.get())
print(stack.get())

print("\nempty: ", stack.empty()
      )  # empty() – Return True if the queue is empty (False otherwise).
示例#10
0
# FIFO - First In First Out
from queue import Queue

q1 = Queue(maxsize=8)
q1.put("Ranjith")
q1.put("Srinika")
q1.put("Madhuri")
# q1.get(block=False)

print(q1.get())
print(q1.get())
print(q1.get())
print(q1.full())
print(q1.empty())

# LIFO - Last In First Out
from queue import LifoQueue

q2 = LifoQueue(maxsize=8)
q2.put("Ranjith")
q2.put("Srinika")
q2.put("Madhuri")

print(q2.get())
print(q2.get())
print(q2.get())
print(q2.full())
print(q2.empty())


示例#11
0
    stack.append(v)
print("Stack=", stack[::-1])
print("Top of stack:", stack.pop(), '\n')

# Using Collections Module
print("STACK USING COLLECTIONS.DEQUE")
from collections import deque
stack = deque()
n = int(input("enter n"))
for i in range(n):
    v = int(input("enter data"))
    stack.append(v)
print(stack)
print("Top of Stack:", stack.pop(), '\n')

# using Queue Module
print("STACK USING QUEUE MODULE")
from queue import LifoQueue
n = int(input("Enter no. of elements of stack"))
stack = LifoQueue(n)
print("Initial Size Of Stack: ", stack.qsize())
for i in range(n):
    v = int(input("Enter data"))
    stack.put(v)

print("Is Stack Full: ", stack.full())
print("Current Size of Stack: ", stack.qsize())
print("Top Of Stack:", stack.get())
print('\n\n\n')
print(dir(LifoQueue))
示例#12
0
class Dispatcher:
    """Dispatcher which schedule workers and communicate with the database.

    The dispatcher uses two queues: a queue containing containing the workers
    which should be launched and a queue containing the workers which are being
    processed. The latter queue has a limited size defined by ``n_workers``.
    Note that these workers can run simultaneously.

    Parameters
    ----------
    config : dict or str
        A configuration YAML file containing the inforation about the database.
    event_config : dict or str
        A RAMP configuration YAML file with information regarding the worker
        and the ramp event.
    worker : Worker, default=CondaEnvWorker
        The type of worker to launch. By default, we launch local worker which
        uses ``conda``.
    n_workers : int, default=1
        Maximum number of workers which can run submissions simultaneously.
    hunger_policy : {None, 'sleep', 'exit'}
        Policy to apply in case that there is no anymore workers to be
        processed:

        * if None: the dispatcher will work without interruption;
        * if 'sleep': the dispatcher will sleep for 5 seconds before to check
          for new submission;
        * if 'exit': the dispatcher will stop after collecting the results of
          the last submissions.
    """
    def __init__(self,
                 config,
                 event_config,
                 worker=None,
                 n_worker=1,
                 hunger_policy=None):
        self.worker = CondaEnvWorker if worker is None else worker
        self.n_worker = (max(multiprocessing.cpu_count() + 1 +
                             n_worker, 1) if n_worker < 0 else n_worker)
        self.hunger_policy = hunger_policy
        # init the poison pill to kill the dispatcher
        self._poison_pill = False
        # create the different dispatcher queues
        self._awaiting_worker_queue = Queue()
        self._processing_worker_queue = LifoQueue(maxsize=self.n_worker)
        self._processed_submission_queue = Queue()
        # split the different configuration required
        if (isinstance(config, str) and isinstance(event_config, str)):
            self._database_config = read_config(config,
                                                filter_section='sqlalchemy')
            self._ramp_config = generate_ramp_config(event_config, config)
        else:
            self._database_config = config['sqlalchemy']
            self._ramp_config = event_config['ramp']
        self._worker_config = generate_worker_config(event_config, config)

    def fetch_from_db(self, session):
        """Fetch the submission from the database and create the workers."""
        submissions = get_submissions(session,
                                      self._ramp_config['event_name'],
                                      state='new')
        if not submissions:
            logger.info('No new submissions fetch from the database')
            return
        for submission_id, submission_name, _ in submissions:
            # do not train the sandbox submission
            submission = get_submission_by_id(session, submission_id)
            if not submission.is_not_sandbox:
                continue
            # create the worker
            worker = self.worker(self._worker_config, submission_name)
            set_submission_state(session, submission_id, 'sent_to_training')
            self._awaiting_worker_queue.put_nowait(
                (worker, (submission_id, submission_name)))
            logger.info('Submission {} added to the queue of submission to be '
                        'processed'.format(submission_name))

    def launch_workers(self, session):
        """Launch the awaiting workers if possible."""
        while (not self._processing_worker_queue.full()
               and not self._awaiting_worker_queue.empty()):
            worker, (submission_id, submission_name) = \
                self._awaiting_worker_queue.get()
            logger.info('Starting worker: {}'.format(worker))
            worker.setup()
            worker.launch_submission()
            set_submission_state(session, submission_id, 'training')
            self._processing_worker_queue.put_nowait(
                (worker, (submission_id, submission_name)))
            logger.info(
                'Store the worker {} into the processing queue'.format(worker))
        if self._processing_worker_queue.full():
            logger.info('The processing queue is full. Waiting for a worker to'
                        ' finish')

    def collect_result(self, session):
        """Collect result from processed workers."""
        try:
            workers, submissions = zip(*[
                self._processing_worker_queue.get()
                for _ in range(self._processing_worker_queue.qsize())
            ])
        except ValueError:
            logger.info('No workers are currently waiting or processed.')
            if self.hunger_policy == 'sleep':
                time.sleep(5)
            elif self.hunger_policy == 'exit':
                self._poison_pill = True
            return
        for worker, (submission_id,
                     submission_name) in zip(workers, submissions):
            if worker.status == 'running':
                self._processing_worker_queue.put_nowait(
                    (worker, (submission_id, submission_name)))
                logger.info('Worker {} is still running'.format(worker))
                time.sleep(0)
            else:
                logger.info('Collecting results from worker {}'.format(worker))
                returncode, stderr = worker.collect_results()
                set_submission_state(
                    session, submission_id,
                    'tested' if not returncode else 'training_error')
                set_submission_error_msg(session, submission_id, stderr)
                self._processed_submission_queue.put_nowait(
                    (submission_id, submission_name))
                worker.teardown()

    def update_database_results(self, session):
        """Update the database with the results of ramp_test_submission."""
        while not self._processed_submission_queue.empty():
            submission_id, submission_name = \
                self._processed_submission_queue.get_nowait()
            if 'error' in get_submission_state(session, submission_id):
                update_leaderboards(session, self._ramp_config['event_name'])
                update_all_user_leaderboards(session,
                                             self._ramp_config['event_name'])
                logger.info('Skip update for {} due to failure during the '
                            'processing'.format(submission_name))
                continue
            logger.info('Update the results obtained on each fold for '
                        '{}'.format(submission_name))
            path_predictions = os.path.join(
                self._worker_config['predictions_dir'], submission_name)
            set_predictions(session, submission_id, path_predictions)
            set_time(session, submission_id, path_predictions)
            set_scores(session, submission_id, path_predictions)
            set_bagged_scores(session, submission_id, path_predictions)
            set_submission_state(session, submission_id, 'scored')
            update_leaderboards(session, self._ramp_config['event_name'])
            update_all_user_leaderboards(session,
                                         self._ramp_config['event_name'])

    def launch(self):
        """Launch the dispatcher."""
        logger.info('Starting the RAMP dispatcher')
        with session_scope(self._database_config) as session:
            logger.info('Open a session to the database')
            try:
                while not self._poison_pill:
                    self.fetch_from_db(session)
                    self.launch_workers(session)
                    self.collect_result(session)
                    self.update_database_results(session)
            finally:
                # reset the submissions to 'new' in case of error or unfinished
                # training
                submissions = get_submissions(session,
                                              self._ramp_config['event_name'],
                                              state=None)
                for submission_id, _, _ in submissions:
                    submission_state = get_submission_state(
                        session, submission_id)
                    if submission_state == 'training':
                        set_submission_state(session, submission_id, 'new')
            logger.info('Dispatcher killed by the poison pill')
示例#13
0
# 后进先出 栈 LIFO
lq = LifoQueue(maxsize = 6)

# 优先级队列
pq = PriorityQueue(maxsize = 5)

for i in range(5):
     q.put(i)
     lq.put(i)
     pq.put(i)

#q.put(i for i in range(5))

print("先进先出队列:{}   是否为空:{} 多大:{}  是否满:{}".format(q.queue,q.empty(),q.qsize(),q.full()))
print("后进先出队列:{}   是否为空:{} 多大:{}  是否满:{}".format(lq.queue,lq.empty(),lq.qsize(),lq.full()))
print("优先级队列:{}   是否为空:{} 多大:{}  是否满:{}".format(pq.queue,pq.empty(),pq.qsize(),pq.full()))

print(q.get(),lq.get(),pq.get())

print("先进先出队列:{}   是否为空:{} 多大:{}  是否满:{}".format(q.queue,q.empty(),q.qsize(),q.full()))
print("后进先出队列:{}   是否为空:{} 多大:{}  是否满:{}".format(lq.queue,lq.empty(),lq.qsize(),lq.full()))
print("优先级队列:{}   是否为空:{} 多大:{}  是否满:{}".format(pq.queue,pq.empty(),pq.qsize(),pq.full()))

# 双边队列deque
dq = deque([1,2,3])
dq.append(4)
print(dq,'\n')
print(dq.pop())
print(dq,'\n')
print(dq.popleft())
示例#14
0
from collections import deque

stackStore = deque()
stackStore.append("Python")
stackStore.append('C++')
stackStore.append('Java')
print(stackStore)

stackStore.pop()
stackStore.pop()
print(stackStore)

#stack using LifoQueue
from queue import LifoQueue

stackMag = LifoQueue(maxsize=8)
print(stackMag.qsize())

stackMag.put('1')
stackMag.put('3')
stackMag.put('5')
stackMag.put('7')
stackMag.put('9')

print("Is the stack full?: ", stackMag.full())
print("The size of the stack is: ", stackMag.qsize())
print(stackMag.get())
print(stackMag.get())
print(stackMag.get())
print(stackMag.get())
print('\nIs the stack empty?: ', stackMag.empty())
class PoseLoader:
    def __init__(self, detectionLoader, single_height, queueSize=1024):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.detectionLoader = detectionLoader
        self.stopped = False
        self.batchSize = opt.posebatch
        self.single_height = single_height
        # initialize the queue used to store data
        self.Q = LifoQueue(maxsize=queueSize)
        # Load pose model
        pose_dataset = Mscoco()
        if opt.fast_inference:
            self.pose_model = InferenNet_fast(4 * 1 + 1, pose_dataset)
        else:
            self.pose_model = InferenNet(4 * 1 + 1, pose_dataset)
        self.pose_model.cuda()
        self.pose_model.eval()

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping the whole dataset
        while True:
            print('read det')
            (orig_img, im_name, boxes, scores, inps, pt1,
             pt2) = self.detectionLoader.read()
            print('read det over ')
            with self.detectionLoader.Q.mutex:
                self.detectionLoader.Q.queue.clear()
            with torch.no_grad():

                if boxes is None or boxes.nelement() == 0:
                    #writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
                    # self.Q.put((orig_img,None,None,None,None,None))
                    if self.Q.full():
                        time.sleep(2)
                    self.Q.put((orig_img, None, None, None, None, None))
                    continue

                datalen = inps.size(0)  # batch数据size
                leftover = 0
                if (datalen) % self.batchSize:
                    leftover = 1
                num_batches = datalen // self.batchSize + leftover
                hm = []
                for j in range(num_batches):
                    inps_j = inps[j * self.batchSize:min(
                        (j + 1) * self.batchSize, datalen)].cuda()
                    hm_j = self.pose_model(inps_j)
                    hm.append(hm_j)
                hm = torch.cat(hm)
                hm = hm.cpu().data

                preds_hm, preds_img, preds_scores = getPrediction(
                    hm, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH,
                    opt.outputResW)
                # result(被姿态估计筛选后的) : bbox,bbox_score,roi,keypoints, kp_score (两个镜头的)
                box,box_s,roi,kp,kp_s = pose_nms(boxes.cpu(), scores.cpu(), preds_img.cpu(), preds_scores.cpu()\
                    ,self.single_height,orig_img)
                if self.Q.full():
                    time.sleep(2)
                self.Q.put((orig_img, box, box_s, roi, kp, kp_s))
                # if boxes is None or boxes.nelement() == 0:
                #     while self.Q.full():
                #         time.sleep(0.2)
                #     self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
                #     continue
                # inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
                # inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)

                # while self.Q.full():
                #     time.sleep(0.2)
                # self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def len(self):
        # return queue len
        return self.Q.qsize()
示例#16
0
 def image_get_thread(self, q: LifoQueue):
     while True:
         img_resp = requests.get(url=self.photo_url)
         if q.full():
             q.get()
         q.put(img_resp)
class WebcamLoader:
    def __init__(self, webcam, queueSize=256):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.stream = cv2.VideoCapture(webcam)
        # self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,384)
        # self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,218)
        assert self.stream.isOpened(), 'Cannot capture source'
        self.stopped = False
        # initialize the queue used to store frames read from
        # the video file
        self.batchSize = 1
        self.Q = LifoQueue(maxsize=queueSize)

    def start(self):
        # start a thread to read frames from the file video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely
        i = 0
        while True:
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                (grabbed, frame) = self.stream.read()
                frame = cv2.resize(
                    frame,
                    (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2)),
                    interpolation=cv2.INTER_AREA)
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
                inp_dim = int(opt.inp_dim)
                img, orig_img, dim = prep_frame(frame, inp_dim)

                im_name = str(i) + '.jpg'

                with torch.no_grad():
                    # Human Detection

                    im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i = i + 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()

    def videoinfo(self):
        # indicate the video info
        fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
        fps = self.stream.get(cv2.CAP_PROP_FPS)
        frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
                     int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        return (fourcc, fps, frameSize)

    def getitem(self):
        # return next frame in the queue
        return self.Q.get()

    def len(self):
        # return queue size
        return self.Q.qsize()

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
示例#18
0
文件: camera.py 项目: zardosht/isar
class CameraService(Service):
    """
    Starts the OpenCV capturing and puts the frame in order in the FIFO queue
    """
    def __init__(self, service_name=None, cam_id=0):
        super().__init__(service_name)

        _queue_size = 20
        self._queue = LifoQueue(_queue_size)

        # self._queue_size = 20
        # self._queue = Queue(self._queue_size)

        self.cam_id = cam_id
        self._capture = None
        self._open_capture()
        self._stop_event = threading.Event()
        self._do_capture = False

    def _open_capture(self):
        if isar.PLATFORM == "Windows":
            self._capture = cv2.VideoCapture(self.cam_id, cv2.CAP_DSHOW)
            width = 1920
            height = 1080
            self._capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
            self._capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        elif isar.PLATFORM == "Linux":
            self._capture = cv2.VideoCapture(self.cam_id, cv2.CAP_V4L2)
            self._capture.set(cv2.CAP_PROP_FOURCC,
                              cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
            width = 1920
            height = 1080
            self._capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
            self._capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

        else:  # Darwin
            self._capture = cv2.VideoCapture(self.cam_id)

            # TODO: possibly for later
            # width = 1920
            # height = 1080
            # self._capture.set(cv2.CAP_PROP_FRAME_WIDTH, width)
            # self._capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
            # self.capture.set(cv2.CAP_PROP_FPS, 24)

        if not self._capture.isOpened():
            message = "Could not open camera {}".format(self.cam_id)
            raise Exception(message)

    def start(self):
        t = threading.Thread(name="CameraThread", target=self._start_capture)
        t.daemon = True
        t.start()

    def _start_capture(self):
        """
        Read OpenCV frames and put them in the Queue.
        It blocks if the queue is full.
        """
        if not self._capture.isOpened():
            self._open_capture()

        frame_number = -1
        while not self._stop_event.is_set():
            if not self._do_capture:
                continue

            if self._queue.full():
                # logger.warning("Camera _queue is full! continue.")
                continue

            # time.sleep(0.05)

            ret, frame = self._capture.read()
            if ret:
                frame_number += 1
                camera_frame = CameraFrame(frame, frame_number)
                self._queue.put(camera_frame)
            else:
                logger.error("Capture was unsuccessful.")

    def stop(self):
        """
        Stop capturing
        :return:
        """
        self._stop_event.set()

        for i in range(100):
            try:
                self._queue.get_nowait()
            except:
                pass

        for i in range(100):
            try:
                self._queue.put_nowait(isar.POISON_PILL)
            except:
                pass

        # TODO: this hangs on stop! why? I don't know
        # self._capture.release()

    def release_capture(self):
        self._capture.release()

    def start_capture(self):
        self._do_capture = True

    def stop_capture(self):
        self._do_capture = False

    def get_frame(self, flipped_x=False, flipped_y=False):
        """
        Return the frame from FIFO queue
        It blocks if the queue is empty.
        :return:
        """
        if not self._do_capture:
            raise RuntimeError(
                "_do_capture is False. Have you forgotten to call start_capture() first?"
            )

        if self._queue.empty():
            # logger.warning("Camera _queue is empty! Return None.")
            return None

        camera_frame = self._queue.get()
        if flipped_x:
            camera_frame.flip(0)

        if flipped_y:
            camera_frame.flip(1)

        return camera_frame

    def get_camera_capture_size(self):
        if self._capture:
            width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH))  # float
            height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
            return width, height
        else:
            return None
示例#19
0
# where you can only access the top-most paper). Traditionally, operations on
# stacks are names push and pop, but since python uses the same API as for
# FIFO queues, we use put() and get() still, but they'll both operate off the
# top of the stack.

from queue import LifoQueue

stack = LifoQueue(maxsize=5)

stack.put('one')
stack.put('two')
stack.put('three')
stack.put('four')
stack.put('five')
# stack.put('six', timeout=5)  # raise Full, queue.Full
print('full: ', stack.full())

stack.get()
stack.get()
stack.get()
stack.get()
last = stack.get()
# q.get(block=False)  # raise Empty, queue.Empty
print('last: ', last)
print('empty: ', stack.empty())

# full:  True
# last:  one
# empty:  True

示例#20
0
print('Elements poped from stack:') 
print(stack.pop()) 
print(stack.pop()) 
print(stack.pop()) 

print('Stack after elements are poped:') 
print(stack) 
'''

# Stack design using LIFOQUEUE

from queue import LifoQueue

lifostack = LifoQueue(maxsize=5)

print(lifostack.qsize())

lifostack.put('sugar')
lifostack.put('lemon drops')
lifostack.put('water')

print("Full: ", lifostack.full())
print("Size: ", lifostack.qsize())

print('Elements poped from the stack')
print(lifostack.get())
print(lifostack.get())
print(lifostack.get())

print("Empty: ", lifostack.empty())
示例#21
0
class WebcamDetectionLoader:
    def __init__(self, webcam=0, batchSize=1, queueSize=256):
        # initialize the file video stream along with the boolean
        # used to indicate if the thread should be stopped or not
        self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
        self.det_model.load_weights('models/yolo/yolov3-spp.weights')
        self.det_model.net_info['height'] = opt.inp_dim
        self.det_inp_dim = int(self.det_model.net_info['height'])
        assert self.det_inp_dim % 32 == 0
        assert self.det_inp_dim > 32
        self.det_model.cuda()
        self.det_model.eval()

        self.stream = cv2.VideoCapture(int(webcam))
        assert self.stream.isOpened(), 'Cannot open webcam'
        self.stopped = False
        self.batchSize = batchSize

        # initialize the queue used to store frames read from
        # the video file
        self.Q = LifoQueue(maxsize=queueSize)

    def len(self):
        return self.Q.qsize()

    def start(self):
        # start a thread to read frames from the file video stream
        t = threading.Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        print(
            f'WebcamDetectionLoader_update_thread: {threading.currentThread().name}'
        )
        # keep looping
        while True:
            img = []
            inp = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(self.batchSize):
                (grabbed, frame) = self.stream.read()
                h, w, c = frame.shape
                # frame = cv2.resize(frame, (int(w / 4), int(h / 4)), interpolation=cv2.INTER_CUBIC)

                if not grabbed:
                    continue
                # process and add the frame to the queue
                inp_dim = int(opt.inp_dim)
                img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
                inp_k = im_to_torch(orig_img_k)

                img.append(img_k)
                inp.append(inp_k)
                orig_img.append(orig_img_k)
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                ht = inp[0].size(1)
                wd = inp[0].size(2)
                # Human Detection
                img = Variable(torch.cat(img)).cuda()
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                im_dim_list = im_dim_list.cuda()

                prediction = self.det_model(img, CUDA=True)
                # NMS process
                dets = dynamic_write_results(prediction,
                                             opt.confidence,
                                             opt.num_classes,
                                             nms=True,
                                             nms_conf=opt.nms_thesh)
                if isinstance(dets, int) or dets.shape[0] == 0:
                    for k in range(len(inp)):
                        if self.Q.full():
                            with self.Q.mutex:
                                self.Q.queue.clear()
                        self.Q.put((inp[k], orig_img[k], None, None))
                    continue

                im_dim_list = torch.index_select(im_dim_list, 0,
                                                 dets[:, 0].long())
                scaling_factor = torch.min(self.det_inp_dim / im_dim_list,
                                           1)[0].view(-1, 1)

                # coordinate transfer
                dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 0].view(-1, 1)) / 2
                dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 1].view(-1, 1)) / 2

                dets[:, 1:5] /= scaling_factor
                for j in range(dets.shape[0]):
                    dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0,
                                                  im_dim_list[j, 0])
                    dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0,
                                                  im_dim_list[j, 1])
                boxes = dets[:, 1:5].cpu()
                scores = dets[:, 5:6].cpu()

            for k in range(len(inp)):
                if self.Q.full():
                    with self.Q.mutex:
                        self.Q.queue.clear()
                self.Q.put((inp[k], orig_img[k], boxes[dets[:, 0] == k],
                            scores[dets[:, 0] == k]))

    def videoinfo(self):
        # indicate the video info
        fourcc = int(self.stream.get(cv2.CAP_PROP_FOURCC))
        fps = self.stream.get(cv2.CAP_PROP_FPS)
        frameSize = (int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),
                     int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
        return (fourcc, fps, frameSize)

    def read(self):
        # return next frame in the queue
        return self.Q.get()

    def more(self):
        # return True if there are still frames in the queue
        return self.Q.qsize() > 0

    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True
示例#22
0
"""
from queue import Queue, LifoQueue, PriorityQueue

# 先进先出队列
q = Queue(maxsize=5)
# 后进先出队列
lq = LifoQueue(maxsize=6)
# 优先级队列
pq = PriorityQueue(maxsize=5)

for i in range(5):
    q.put(i)
    lq.put(i)
    pq.put(i)

print("先进先出队列:%s;是否为空:%s;多大,%s;是否满,%s" %
      (q.queue, q.empty(), q.qsize(), q.full()))
print("后进先出队列:%s;是否为空:%s;多大,%s;是否满,%s" %
      (lq.queue, lq.empty(), lq.qsize(), lq.full()))
print("优先级队列:%s;是否为空:%s,多大,%s;是否满,%s" %
      (pq.queue, pq.empty(), pq.qsize(), pq.full()))

print(q.get(), lq.get(), pq.get())

print("先进先出队列:%s;是否为空:%s;多大,%s;是否满,%s" %
      (q.queue, q.empty(), q.qsize(), q.full()))
print("后进先出队列:%s;是否为空:%s;多大,%s;是否满,%s" %
      (lq.queue, lq.empty(), lq.qsize(), lq.full()))
print("优先级队列:%s;是否为空:%s,多大,%s;是否满,%s" %
      (pq.queue, pq.empty(), pq.qsize(), pq.full()))
'''
Output:
"type(stack)" is <class 'queue.LifoQueue'>
'''

# add elements to stack using put() method
stack.put('A')
stack.put('B')
stack.put('C')
print(f'qsize is {stack.qsize()}')
'''
Output:
qsize is 3
'''

print(stack.full())
'''
Output:
True
'''

print(stack.empty())
'''
Output:
False
'''

print(stack.queue)
'''
Output:
['A', 'B', 'C']
示例#24
0
from queue import LifoQueue

n = int(input())
stack = LifoQueue(maxsize=n)
print("Initial size", stack.qsize())
for i in range(n):
    stack.put(input())
print("Queue size is", stack.qsize())
if stack.full():
    print("Queue is full")
print(stack.get())
print(stack.get())
print(stack.get())
示例#25
0
class ImageLoader:
    def __init__(self, im_names, img_sorce, batchSize=1, format='yolo', queueSize=256):
        self.img_sorce = img_sorce
        self.img_dir = opt.inputpath
        self.imglist = im_names
        # self.transform = transforms.Compose([
        #     transforms.ToTensor(),
        #     transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        # ])
        self.format = format

        self.batchSize = batchSize
        # self.datalen = len(self.imglist) # 1
        # leftover = 0
        # if (self.datalen) % batchSize: # 1 % 1 = 0
        #     leftover = 1
        # self.num_batches = self.datalen // batchSize + leftover # 1 // 1 + 0 = 1

        # initialize the queue used to store data
        self.Q = LifoQueue(maxsize=queueSize)

    def start(self):
        p = Thread(target=self.getitem_yolo, args=()) 
        p.daemon = True
        p.start()
        return self

    def getitem_yolo(self): # 主循环线程, 在节点文件的回调函数中不断传入新图像即可
        i = 0
        while True:
            if not self.Q.full():
                img = []
                orig_img = []
                im_name = []
                im_dim_list = []
                for k in range(self.batchSize): # for k in range(0, 1):
                    ### 订阅更新图像信息
                    frame = self.img_sorce.getImage()
                    print ("process new frame...")

                    inp_dim = int(opt.inp_dim)
                    img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
                
                    img.append(img_k)
                    orig_img.append(orig_img_k)
                    im_name.append(str(i)+'.jpg')
                    im_dim_list.append(im_dim_list_k)

                with torch.no_grad():
                    # Human Detection
                    img = torch.cat(img)
                    im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
                    im_dim_list_ = im_dim_list
                    
                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i += 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()


    def getitem(self):
        return self.Q.get()

    def length(self):
        return len(self.imglist)

    def len(self):
        return self.Q.qsize()
示例#26
0
class PictureGrid(QWidget):
    def __init__(self, parent, uids, scrollbar, picture_viewer, meta_viewer):
        super().__init__(parent)
        self.scrollbar = scrollbar
        self.picture_viewer = picture_viewer
        self.meta_viewer = meta_viewer
        self.uids = uids
        self.images = {}
        self.selected_index = -1

        self.setFocusPolicy(QtCore.Qt.StrongFocus)

        scrollbar.valueChanged.connect(lambda: self.update())

        self.image_queue = LifoQueue(20)

        def load_images():
            while True:
                uid = self.image_queue.get(block = True)
                image = QImage(str(picture_file_for_uid(uid))).scaled(
                    IMG_SIZE, IMG_SIZE, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
                self.images[uid] = image
                self.update()

        image_loading_thread = Thread(target = load_images)
        image_loading_thread.setDaemon(True)
        image_loading_thread.start()

    def get_image(self, uid):
        if uid in self.images:
            uid = self.images[uid]
            if uid: return uid
            return IMG_LOADING

        if not self.image_queue.full():
            self.images[uid] = None
            self.image_queue.put(uid, block = True)
        return IMG_LOADING

    def get_selected_uid(self):
        if 0 <= self.selected_index < len(self.uids):
            return self.uids[self.selected_index]
        else: return None

    def paintEvent(self, event):
        super().paintEvent(event)

        height = max(self.height() // IMG_SIZE, 1)
        width = max(self.width() // IMG_SIZE, 1)

        self.scrollbar.setMaximum(len(self.uids) // width)
        self.scrollbar.setPageStep(1)

        painter = QtGui.QPainter(self)
        for i in range(0, width):
            for j in range(0, height):
                x = i * IMG_SIZE
                y = j * IMG_SIZE
                index = i + (j + self.scrollbar.value()) * width
                if index < len(self.uids):
                    if index == self.selected_index:
                        painter.fillRect(x, y, IMG_SIZE, IMG_SIZE, QColor.fromRgb(0xCCE8FF))

                    image = self.get_image(self.uids[index])
                    painter.drawImage(x + IMG_SIZE / 2 - image.width() / 2, y + IMG_SIZE / 2 - image.height() / 2, image)

                    if index == self.selected_index:
                        painter.setPen(QColor.fromRgb(0x99D1FF))
                        painter.drawRect(x, y, IMG_SIZE - 1, IMG_SIZE - 1)

        painter.end()

    def mousePressEvent(self, press_event):
        height = max(self.height() // IMG_SIZE, 1)
        width = max(self.width() // IMG_SIZE, 1)

        x = press_event.x() // IMG_SIZE
        y = press_event.y() // IMG_SIZE

        if x < width and y < height:
            self.selected_index = x + (y + self.scrollbar.value()) * width
            if self.selected_index >= len(self.uids):
                self.selected_index = -1
            self.update()

            uid = self.get_selected_uid()
            if uid:
                self.picture_viewer.set_image(uid)
                self.picture_viewer.update()
                self.meta_viewer.set_meta(get_meta(uid))

    def wheelEvent(self, wheel_event):
        self.scrollbar.wheelEvent(wheel_event)
        self.update()