Пример #1
0
    def detect_one(self, **kwargs):
        '''
        Inference on a single image.

        Args:
            img_path: str or pil_img: PIL.Image

            input_size: int, input resolution
            conf_thres: float, confidence threshold

            return_img: bool, if True, return am image with bbox visualizattion. \
                default: False
            visualize: bool, if True, plt.show the image with bbox visualization. \
                default: False
        '''
        assert 'img_path' in kwargs or 'pil_img' in kwargs
        img = kwargs.get('pil_img', None) or Image.open(kwargs['img_path'])

        detections = self._predict_pil(img, **kwargs)

        if kwargs.get('return_img', False):
            np_img = np.array(img)
            visualization.draw_dt_on_np(np_img, detections, **kwargs)
            return np_img
        if kwargs.get('visualize', False):
            np_img = np.array(img)
            visualization.draw_dt_on_np(np_img, detections, **kwargs)
            plt.figure(figsize=(10, 10))
            plt.imshow(np_img)
            plt.show()
        return detections
Пример #2
0
def camera_thread(data, stop):
	print("(", data.cam_ip, ")[---] Starting camera thread")
	while(stop()):
		# We moeten elke keer opnieuw verbinden met de camera omdat anders
		# de videostream wordt gesloten omdat er te weinig activiteit plaatsvindt
		print("(", data.cam_ip, ")[1/3] Capturing frame...")
		capture = cv2.VideoCapture("http://" + data.cam_ip + ":81/stream", cv2.CAP_FFMPEG)
		
		success, frame = capture.read()
		capture.release()

		if success == False:
			print("(", data.cam_ip, ")[---] Frame capture unsuccessful!")
			data.outgoing += b'\x03'
			break

		if os.path.exists(get_tmp_filename(data.cam_ip)):
			os.remove(get_tmp_filename(data.cam_ip))

		cv2.imwrite(get_tmp_filename(data.cam_ip), frame)
		print("(", data.cam_ip, ")[2/3] Saved. Processing...")
		dts = detector.detect_one(img_path = get_tmp_filename(data.cam_ip), input_size=800, conf_thres=0.3, visualize=False, return_img=False)
		
		np_image = numpy.array(Image.open(get_tmp_filename(data.cam_ip)))
		visualization.draw_dt_on_np(np_image, dts, show_angle=True, show_count=True, text_size=0.30)
		im = Image.fromarray(np_image)
		im.save("." + data.cam_ip + ".debug.png")

		people_amount = len(dts)
		print("(", data.cam_ip, ")[3/3] Done:", people_amount)
		data.outgoing += b'\x04'
		data.outgoing += bytes([people_amount])
Пример #3
0
    def _predict_pil(self, pil_img, **kwargs):
        '''
        Args:
            pil_img: PIL.Image.Image
            input_size: int, input resolution
            conf_thres: float, confidence threshold
        '''
        input_size = kwargs.get('input_size', self.input_size)
        conf_thres = kwargs.get('conf_thres', self.conf_thres)
        assert isinstance(pil_img, Image.Image), 'input must be a PIL.Image'
        assert input_size is not None, 'Please specify the input resolution'
        assert conf_thres is not None, 'Please specify the confidence threshold'

        # pad to square
        input_img, _, pad_info = utils.rect_to_square(pil_img, None,
                                                      input_size, 0)

        input_ori = tvf.to_tensor(input_img)
        input_ = input_ori.unsqueeze(0)

        assert input_.dim() == 4
        device = next(self.model.parameters()).device
        input_ = input_.to(device=device)
        with torch.no_grad():
            dts = self.model(input_).cpu()

        dts = dts.squeeze()
        # post-processing
        dts = dts[dts[:, 5] >= conf_thres]
        if len(dts) > 1000:
            _, idx = torch.topk(dts[:, 5], k=1000)
            dts = dts[idx, :]
        if kwargs.get('debug', False):
            np_img = np.array(input_img)
            visualization.draw_dt_on_np(np_img, dts)
            plt.imshow(np_img)
            plt.show()
        dts = utils.nms(dts,
                        is_degree=True,
                        nms_thres=0.45,
                        img_size=input_size)
        dts = utils.detection2original(dts, pad_info.squeeze())
        if kwargs.get('debug', False):
            np_img = np.array(pil_img)
            visualization.draw_dt_on_np(np_img, dts)
            plt.imshow(np_img)
            plt.show()
        return dts
Пример #4
0
                                    num_workers=num_cpu,
                                    pin_memory=True,
                                    drop_last=False)
            dataiterator = iter(dataloader)

        # save checkpoint
        if iter_i > 0 and (iter_i % args.checkpoint_interval == 0):
            state_dict = {
                'iter': iter_i,
                'model': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }
            save_path = os.path.join('./weights',
                                     f'{job_name}_{today}_{iter_i}.ckpt')
            torch.save(state_dict, save_path)

        # save detection
        if iter_i > 0 and iter_i % args.img_interval == 0:
            for img_path in eval_img_paths:
                eval_img = Image.open(img_path)
                dts = api.detect_once(model,
                                      eval_img,
                                      conf_thres=0.1,
                                      input_size=target_size)
                np_img = np.array(eval_img)
                visualization.draw_dt_on_np(np_img, dts)
                np_img = cv2.resize(np_img, (416, 416))
                # cv2.imwrite(f'./results/eval_imgs/{job_name}_{today}_{iter_i}.jpg', np_img)
                logger.add_image(img_path, np_img, iter_i, dataformats='HWC')

            model.train()