def car_detect(Iorig):
        Lcars = []
        R = []
        R = detect(vehicle_net, vehicle_meta, Iorig, thresh=0.5)

        R = [r for r in R if r[0] in [b'car', 'bus']]

        if len(R):
            WH = np.array(Iorig.shape[1::-1], dtype=float)
            Lcars = []
            cars_crop = []
            for i, r in enumerate(R):
                cx, cy, w, h = (old_div(np.array(r[2]), np.concatenate(
                    (WH, WH)))).tolist()
                tl = np.array([cx - w / 2., cy - h / 2.])
                br = np.array([cx + w / 2., cy + h / 2.])
                label = Label(0, tl, br)
                Icar = crop_region(Iorig, label)
                cv2.imwrite("crop1.png", Icar)
                Icar1 = cv2.imread("crop1.png")
                Lcars.append(label)
                cars_crop.append(Icar1)
            return Lcars, cars_crop
Example #2
0
            print('\t\t%d cars found' % len(R))

            if len(R):

                Iorig = cv2.imread(img_path)
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Lcars = []

                for i, r in enumerate(R):

                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                        (WH, WH))).tolist()
                    tl = np.array([cx - w / 2., cy - h / 2.])
                    br = np.array([cx + w / 2., cy + h / 2.])
                    label = Label(0, tl, br)
                    Icar = crop_region(Iorig, label)

                    Lcars.append(label)

                    cv2.imwrite('%s/%s_%dcar.png' % (output_dir, bname, i),
                                Icar)

                lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)

    except:
        traceback.print_exc()
        sys.exit(1)

    sys.exit(0)
Example #3
0
	def on_any_event(event):
		if event.is_directory:
			return None

		elif event.event_type == 'created':
			# Take any action here when a file is first created.
			print("Received created event - %s." % event.src_path)
			try:
				print('\tScanning %s' % event.src_path)
				try:
					img_path = event.src_path

					print('\tScanning %s' % img_path)

					bname = basename(splitext(img_path)[0])

					R, _ = detect(vehicle_net, vehicle_meta, img_path.encode('utf-8'), thresh=vehicle_threshold)
					P, _ = detect(vehicle_net, vehicle_meta, img_path.encode('utf-8'), thresh=vehicle_threshold)
					R = [r for r in R if r[0] in ['car', 'bus']]
					P = [p for p in P if p[0] in ['person']]

					print('\t\t%d cars found' % len(R))
					print('\t\t%d persons found' % len(P))

					if len(R):

						Iorig = cv2.imread(img_path)
						WH = np.array(Iorig.shape[1::-1], dtype=float)
						Lcars = []

						for i, r in enumerate(R):
							cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
							tl = np.array([cx - w / 2., cy - h / 2.])
							br = np.array([cx + w / 2., cy + h / 2.])
							label = Label(0, tl, br)
							Icar = crop_region(Iorig, label)

							Lcars.append(label)

							cv2.imwrite('%s/%s_%dcar.png' % (output_dir, bname, i), Icar)

						# lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)
					
					if len(P):

						Iorig = cv2.imread(img_path)
						WH = np.array(Iorig.shape[1::-1], dtype=float)
						Lpeople = []

						for i, r in enumerate(P):
							cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
							print(cx,cy,w,h)
							tl = np.array([cx - w / 2., cy - h / 2.])
                        				tl = [tl[0]-.03,tl[1]-.03]
                        				if tl[0]<0:
                            					tl[0]=0.00
                        				if tl[1]<0:
                            					tl[1]=0.00
							br = np.array([cx + w / 2., cy + h / 2.])
							br = [br[0]+.03,br[1]+.03]
                        				if br[0]>1:
                            					br[0]=1.00
                        				if br[1]>1:
                            					br[1]=1.00
							label = Label(0, tl, br)
							Iperson = crop_region(Iorig, label)
                                                        print(tl,br)
							Lpeople.append(label)
							height,width,_ = Iperson.shape
                        				print(height,width)
							if width>320:
								cv2.imwrite('%s/%s_%dperson.png' % (output_dir_people, bname, i), Iperson)
							else:
								print('face likely too small to recognize')
								print('%s/%s_%dperson.png' % (output_dir_people, bname, i))

						# lwrite('%s/%s_cars.txt' % (output_dir, bname), Lcars)


				except:
					traceback.print_exc()
					sys.exit(1)


			except:
				traceback.print_exc()
				sys.exit(1)


		elif event.event_type == 'modified':
			# Taken any action here when a file is modified.
			print("Received modified event - %s." % event.src_path)
Example #4
0
            frame_rgb = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
            dn.copy_image_from_bytes(darknet_image, frame_rgb.tobytes())
            # im = nparray_to_image(arr)
            R = detect_image(vehicle_net, vehicle_meta, darknet_image, thresh=vehicle_threshold)
            R = [r for r in R if r[0].decode('utf-8') in ['car', 'bus', 'truck']]
            if len(R):
                WH = np.array(arr.shape[1::-1], dtype=float)
                Lcars = []
                for i, r in enumerate(R):

                    cx, cy, w, h = (np.array(r[2]) / np.concatenate((WH, WH))).tolist()
                    tl = np.array([cx - w / 2., cy - h / 2.])
                    br = np.array([cx + w / 2., cy + h / 2.])
                    label = Label(0, tl, br)
                    Lcars.append(label)
                    Icar = crop_region(arr, label)
                    # print('Searching for license plates using WPOD-NET')
                    ratio = float(max(Icar.shape[:2])) / min(Icar.shape[:2])
                    side = int(ratio * 288.)
                    bound_dim = min(side + (side % (2 ** 4)), 608)
                    # print("\t\tBound dim: %d, ratio: %f" % (bound_dim, ratio))
                    Llp, LlpImgs, _ = detect_lp(wpod_net, Icar / 255, bound_dim, 2 ** 4, (240, 80),
                                                0.5)
                    if len(LlpImgs):
                        Ilp = LlpImgs[0]
                        res, confidence = ocrmodel.recognizeOneframe(Ilp * 255.)

                        pts = Llp[0].pts * label.wh().reshape(2, 1) + label.tl().reshape(2, 1)
                        ptspx = pts * np.array(arr.shape[1::-1], dtype=float).reshape(2, 1)
                        draw_losangle(arr, ptspx, RED, 3)
                        if confidence > 0.5:
Example #5
0
            if len(R):
                Iorig = frame  # 原始帧图片
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Lcars = []

                for i, r in enumerate(R):

                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                        (WH, WH))).tolist()
                    tl = np.array([cx - w / 2., cy - h / 2.])
                    br = np.array([cx + w / 2., cy + h / 2.])
                    label = Label(0, tl, br)
                    print(label.wh())
                    print(label.tl())
                    Icar = crop_region(Iorig,
                                       label)  # 截取车辆区域 Icar 车辆 label 车辆坐标信息
                    Icar = Icar.astype('uint8')

                    Lcars.append(label)
                    draw_label(Iorig, label, color=YELLOW, thickness=3)

                    # lp detector
                    print('Searching for license plates using WPOD-NET')

                    ratio = float(max(Icar.shape[:2])) / min(Icar.shape[:2])
                    side = int(ratio * 288.)
                    bound_dim = min(side + (side % (2**4)), 608)
                    print("\t\tBound dim: %d, ratio: %f" % (bound_dim, ratio))

                    Llp, LlpImgs, elapse = detect_lp(wpod_net, im2single(Icar),
                                                     bound_dim, 2**4,
Example #6
0
            R, _ = detect(lp_net,
                          lp_meta,
                          img_path.encode('utf-8'),
                          thresh=lp_threshold)
            #R = [r for r in R if r[0] in ['lp']]

            if len(R):
                Iorig = cv2.imread(img_path)
                WH = np.array(Iorig.shape[1::-1], dtype=float)
                Llp = []

                for i, r in enumerate(R):
                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                        (WH, WH))).tolist()
                    tl = np.array([cx - w / 2., cy - h / 2.])
                    br = np.array([cx + w / 2., cy + h / 2.])
                    label = Label(0, tl, br)
                    Ilp = crop_region(Iorig, label)

                    Llp.append(label)
                    cv2.imwrite('%s/%s_lp.png' % (output_dir, bname), Ilp)

                lwrite('%s/%s_lp.txt' % (output_dir, bname), Llp)

    except:
        traceback.print_exc()
        sys.exit(1)

    sys.exit(0)
    def detect(self, img, bname, pixel_threshold=100000):
        output_dir = "output/tmp"
        boxes = []
        classes_id = []
        confidences = []
        scale = 0.00392

        blob = cv2.dnn.blobFromImage(img,
                                     scalefactor=scale,
                                     size=(416, 416),
                                     mean=(0, 0, 0),
                                     swapRB=True,
                                     crop=False)
        height, width = img.shape[:2]

        # take image to model
        self.model.setInput(blob)

        # run forward
        outputs = self.model.forward(utils.get_output_layers(self.model))

        for out in outputs:
            for detection in out:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = float(scores[class_id])

                if confidence > self.threshold:
                    # coordinate of bounding boxes
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)

                    #cv2.circle(img,(center_x,center_y),10,(0,255,0),2)
                    #rectangle co-ordinaters
                    x = int(center_x - w / 2)
                    y = int(center_y - h / 2)
                    #cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)

                    boxes.append([x, y, w, h])
                    classes_id.append(class_id)
                    confidences.append(confidence)

        indices = cv2.dnn.NMSBoxes(boxes,
                                   confidences,
                                   score_threshold=self.threshold,
                                   nms_threshold=0.3)

        R = []
        for i in range(len(boxes)):
            if i in indices:
                x, y, w, h = boxes[i]
                label = str(self.labels[classes_id[i]])
                R.append((label, confidences[i], (x, y, w, h)))

        R = sorted(R, key=lambda x: -x[1])
        R = [r for r in R if r[0] in ['car', 'bus']]

        cars_img = []
        Lcars = []

        if len(R):
            # Iorig = cv2.imread(img_path)
            Iorig = img
            WH = np.array(Iorig.shape[1::-1], dtype=float)

            for i, r in enumerate(R):
                # print("car size: w={}, h={}, area={}", r[2][2], r[2][3], r[2][2] * r[2][3])
                _, _, ori_w, ori_h = r[2]

                if ori_w * ori_h >= pixel_threshold:
                    cx, cy, w, h = (np.array(r[2]) / np.concatenate(
                        (WH, WH))).tolist()
                    tl = np.array([cx, cy])
                    br = np.array([cx + w, cy + h])
                    label = Label(0, tl, br)
                    Icar = crop_region(Iorig, label)

                    Lcars.append(label)

                    # cv2.imwrite('{}/{}_{}car.png'.format(output_dir, bname, i), Icar)
                    cars_img.append(Icar)

            # lwrite('{}/{}_cars.txt'.format(output_dir, bname), Lcars)

        print('\t\t{} cars found by using detect_vehicle'.format(
            len(cars_img)))

        return cars_img, Lcars