Exemple #1
0
async def main():
    start = time.time()

    # queue is used for concurrent inference of multiple images
    streamqueue = asyncio.Queue()
    # queue size defines concurrency level
    streamqueue_size = 3

    for _ in range(streamqueue_size):
        streamqueue.put_nowait(torch.cuda.Stream(device='cuda:0'))

    # 单进程
    share_submit_result = []
    for img_name in tqdm(img_names):
        img_path = os.path.join(img_folder, img_name)
        img = mmcv.imread(img_path)

        async with concurrent(streamqueue):
            result = await async_inference_detector(model, img)

        for cls, item in enumerate(result):
            if item is None:
                continue
            else:
                for row in item:
                    share_submit_result.append({
                        'name': img_name,
                        'category': cls + 1,
                        'bbox': row[:4].tolist(),
                        'score': str(row[4])
                    })

        # save the visualization results to image files
        ##### model.show_result(img, result, out_file=img_name)
        for cls, item in enumerate(result):
            if item is None:
                continue
            else:
                for row in item:
                    label = '%s %.2f' % (model.CLASSES[int(cls)], row[4])
                    plot_one_box(row[:4],
                                 img,
                                 label=label,
                                 color=colors[cls],
                                 line_thickness=3)
        cv2.imwrite('output/' + img_name, img)

    # with open('result.json', 'w') as fp:
    #     json.dump(list(share_submit_result), fp, indent=4, ensure_ascii=False)

    print('time use: %.3fs' % (time.time() - start))
 async def apredict(self, img):
     if isinstance(img, str):
         img = mmcv.imread(img)
     async with concurrent(self.streamqueue):
         result = await async_inference_detector(self.model, img)
     return result
Exemple #3
0
 async def detect(img):
     async with concurrent(streamqueue):
         return await async_inference_detector(model, img)