def detect_classify_modified(self):
        """
        detect and classify
        """
        # print(self.imgs_path)
        for tracklet in self.imgs_path:
            tracklet_camera_path = [
                os.path.join(tracklet, x) for x in os.listdir(tracklet)
            ]

            for tracklet_camera in tracklet_camera_path:
                the_imgs_path = [
                    os.path.join(tracklet_camera, x)
                    for x in os.listdir(tracklet_camera) if x.endswith('.jpg')
                ]
                # print(the_imgs_path)

                for the_img in the_imgs_path:
                    # print(the_img)
                    # read image data
                    img = cv2.imread(the_img)
                    img = cv2.copyMakeBorder(img,
                                             BORDER,
                                             BORDER,
                                             BORDER,
                                             BORDER,
                                             cv2.BORDER_CONSTANT,
                                             value=(100, 100, 100))
                    img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))

                    img2det = process_img(img, self.inp_dim)
                    img2det = img2det.to(device)  # put image data to device

                    # vehicle detection
                    prediction = self.detector.forward(img2det, CUDA=True)

                    # calculating scaling factor
                    orig_img_size = list(img.size)
                    output = self.process_predict(prediction, self.prob_th,
                                                  self.num_classes,
                                                  self.nms_th, self.inp_dim,
                                                  orig_img_size)

                    orig_img = cv2.cvtColor(np.asarray(img),
                                            cv2.COLOR_RGB2BGR)  # RGB => BGR

                    print(the_img)
                    try:
                        if type(output) != int:
                            self.cls_draw_bbox(output, orig_img)
                            print('\n', os.path.split(the_img)[0])
                            dst_path = self.dst_dir + '/' + os.path.split(
                                the_img)[0] + '/' + os.path.split(the_img)[1]
                            print(dst_path)
                            if not os.path.exists(dst_path):
                                cv2.imwrite(dst_path, orig_img)
                    except Exception as inst:
                        img.show()
                        print(inst)
                        exit(2)
Пример #2
0
    def detect_classify(self):
        """
        detect and classify
        """
        for x in self.imgs_path:
            # read image data
            img = Image.open(x)
            img2det = process_img(img, self.inp_dim)
            img2det = img2det.to(device)  # put image data to device

            # vehicle detection
            prediction = self.detector.forward(img2det, CUDA=True)

            # calculating scaling factor
            orig_img_size = list(img.size)
            output = self.process_predict(prediction,
                                          self.prob_th,
                                          self.num_classes,
                                          self.nms_th,
                                          self.inp_dim,
                                          orig_img_size)

            orig_img = cv2.cvtColor(np.asarray(
                img), cv2.COLOR_RGB2BGR)  # RGB => BGR
            if type(output) != int:
                self.cls_draw_bbox(output, orig_img)
                dst_path = self.dst_dir + '/' + os.path.split(x)[1]
                if not os.path.exists(dst_path):
                    cv2.imwrite(dst_path, orig_img)
    def predict(self):
        """
        批量检测和识别, 将检测, 识别结果输出到dst_dir
        """
        for x in self.imgs_path:
            # 读取图像数据
            img = Image.open(x)
            img2det = process_img(img, self.inp_dim)
            img2det = img2det.to(device)  # 图像数据放到device

            # 车辆检测
            prediction = self.Net.forward(img2det, CUDA=True)

            # 计算scaling factor
            orig_img_size = list(img.size)
            output = process_predict(prediction, self.prob_th,
                                     self.num_classes, self.nms_th,
                                     self.inp_dim, orig_img_size)

            orig_img = cv2.cvtColor(np.asarray(img),
                                    cv2.COLOR_RGB2BGR)  # RGB => BGR
            if type(output) != int:
                # 将检测框bbox绘制到原图上
                # draw_car_bbox(output, orig_img)
                self.cls_draw_bbox(output, orig_img)
                # self.cls_and_draw(output, img)
                dst_path = self.dst_dir + '/' + os.path.split(x)[1]
                if not os.path.exists(dst_path):
                    cv2.imwrite(dst_path, orig_img)
Пример #4
0
    def detect_classify(self):
        """
        detect and classify
        """
        for x in self.imgs_path:
            # read image data
            img = Image.open(x)
            img2det = process_img(img, self.inp_dim)
            img2det = img2det.to(device)  # put image data to device

            # vehicle detection
            prediction = self.detector.forward(img2det, CUDA=True)

            # calculating scaling factor
            orig_img_size = list(img.size)
            output = self.process_predict(prediction,
                                          self.prob_th,
                                          self.num_classes,
                                          self.nms_th,
                                          self.inp_dim,
                                          orig_img_size)

            #orig_img = cv2.cvtColor(np.asarray(
                #img), cv2.COLOR_RGB2BGR)  # RGB => BGR
            #if type(output) != int:
                #self.cls_draw_bbox(output, orig_img)
                #dst_path = self.dst_dir + '/' + os.path.split(x)[1]
                #if not os.path.exists(dst_path):
                    #cv2.imwrite(dst_path, orig_img)

            # [left-up(x),left-up(y),right-down(x),right-down(y)] --当检测到的目标大于一个
            x_left = output[:, 1].item()
            y_left = output[:, 2].item()
            x_right = output[:, 3].item()
            y_right = output[:, 4].item()

            # centriod[x,y]
            x_centriod = (x_left + x_right) / 2
            y_centriod = (y_left + y_right) / 2
            w_rect = x_right - x_left
            h_rect = y_right - y_left

            # 4 corners point
            x_leftup = x_centriod - w_rect/2
            y_leftup = y_centriod - h_rect/2
            x_leftdown = x_centriod - w_rect / 2
            y_leftdown = y_centriod + h_rect / 2
            x_rightup = x_centriod + w_rect / 2
            y_rightup = y_centriod - h_rect / 2
            x_rightdown = x_centriod + w_rect / 2
            y_rightdown = y_centriod + h_rect / 2

            # new lists to deposit the 4 corners point
            leftup = [int(x_leftup), int(y_leftup)]
            leftdown = [int(x_leftdown), int(y_leftdown)]
            rightup = [int(x_rightup), int(y_rightup)]
            rightdown = [int(x_rightdown), int(y_rightdown)]

            return leftup, leftdown, rightup, rightdown
Пример #5
0
    def detect_classify_video(self, video_path, res_path):
        """
        detect in video frames
        """
        # myvideo = cv2.VideoCapture(video_path)
        # retval = cv2.VideoCapture.grab()
        # 获得视频的格式
        videoCapture = cv2.VideoCapture(video_path)

        # 获得码率及尺寸
        fps = videoCapture.get(cv2.CAP_PROP_FPS)
        size = (int(videoCapture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(videoCapture.get(cv2.CAP_PROP_FRAME_HEIGHT)))

        # 指定写视频的格式, I420-avi, MJPG-mp4
        videoWriter = cv2.VideoWriter(
            res_path, cv2.VideoWriter_fourcc('X', 'V', 'I', 'D'), fps, size)

        # 读帧
        success, frame = videoCapture.read()

        while success:
            # cv2.imshow("Oto Video", frame) #显示
            cv2.waitKey(int(1000 / int(fps)))  # 延迟
            # 检测图片
            img = cv2.cvtColor(np.asarray(frame), cv2.COLOR_RGB2BGR)
            img2det = process_img(img, self.inp_dim)
            img2det = img2det.to(device)  # put image data to device

            # vehicle detection
            prediction = self.detector.forward(img2det, CUDA=use_cuda)

            # calculating scaling factor
            orig_img_size = list(size)
            output = self.process_predict(prediction, self.prob_th,
                                          self.num_classes, self.nms_th,
                                          self.inp_dim, orig_img_size)

            orig_img = cv2.cvtColor(np.asarray(img),
                                    cv2.COLOR_RGB2BGR)  # RGB => BGR
            if type(output) != int:
                self.cls_draw_bbox(output, orig_img)
                videoWriter.write(orig_img)  # 写视频帧

            success, frame = videoCapture.read()  # 获取下一帧
def test_car_detect(car_cfg_path='./car.cfg',
                    car_det_weights_path='g:/Car_DR/car_360000.weights'):
    """
    imgs_path: 图像数据路径
    """
    inp_dim = 768
    prob_th = 0.2  # 车辆检测概率阈值
    nms_th = 0.4  # NMS阈值
    num_cls = 1  # 只检测车辆1类

    # 初始化车辆检测模型及参数
    Net = Darknet(car_cfg_path)
    Net.load_weights(car_det_weights_path)
    Net.net_info['height'] = inp_dim  # 车辆检测输入分辨率
    Net.to(device)
    Net.eval()  # 测试模式
    print('=> car detection model initiated.')

    # 读取图像数据
    img = Image.open(
        'f:/FaceRecognition_torch_0_4/imgs_21/det_2018_08_21_63_1.jpg')
    img2det = process_img(img, inp_dim)
    img2det = img2det.to(device)  # 图像数据放到device

    # 测试车辆检测
    prediction = Net.forward(img2det, CUDA=True)

    # 计算scaling factor
    orig_img_size = list(img.size)
    output = process_predict(prediction, prob_th, num_cls, nms_th, inp_dim,
                             orig_img_size)

    orig_img = np.asarray(img)
    if type(output) != int:
        # 将检测框bbox绘制到原图上
        draw_car_bbox(output, orig_img)

    cv2.imshow('test', orig_img)
    cv2.waitKey()
Пример #7
0
    def detect_classify(self):
        """        
        detect and classify
        """
        #read and save video
        cap = cv2.VideoCapture(self.video_path)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(self.dst_path, fourcc, 25.0, (960, 540))
        
        while(cap.isOpened()):
            # read image data
            ret, x = cap.read()
            re_img = cv2.resize(x, (960, 540), cv2.INTER_LINEAR)
            img = Image.fromarray(cv2.cvtColor(re_img, cv2.COLOR_BGR2RGB))
            img2det = process_img(img, self.inp_dim)
            img2det = img2det.to(device)  # put image data to device

            # vehicle detection
            prediction = self.detector.forward(img2det, CUDA=True)

            # calculating scaling factor
            orig_img_size = list(img.size)
            output = self.process_predict(prediction,
                                          self.prob_th,
                                          self.num_classes,
                                          self.nms_th,
                                          self.inp_dim,
                                          orig_img_size)

            orig_img = cv2.cvtColor(np.asarray(
                img), cv2.COLOR_RGB2BGR)  # RGB => BGR
            if type(output) != int:
                orig_img = self.cls_draw_bbox(output, orig_img)
                orig_img = self.car_recognition(output, orig_img)
            out.write(orig_img)
        cap.release()
        out.release()
Пример #8
0
    def detect_classify(self, query_pair):
        pre_path = ''
        color_dict = {}
        type_dict = {}
        # cars = []
        # all_cars_per_camera = {}
        index_list_all = []
        index_list_per_camera = []

        pre_camera_id = self.imgs_path[0].split('/')[3]

        stream_i = 0
        print("\n\nProcessing stream %d...\n" % stream_i)

        tracklet_i = 0
        """
        detect and classify
        """
        for x in self.imgs_path:
            curr_path = os.path.split(x)[0]

            # read image data
            img = cv2.imread(x)
            img = cv2.copyMakeBorder(img,
                                     BORDER,
                                     BORDER,
                                     BORDER,
                                     BORDER,
                                     cv2.BORDER_CONSTANT,
                                     value=(100, 100, 100))
            img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_RGB2BGR))

            img2det = process_img(img, self.inp_dim)
            img2det = img2det.to(device)  # put image data to device

            # vehicle detection
            prediction = self.detector.forward(img2det, CUDA=True)

            # calculating scaling factor
            orig_img_size = list(img.size)
            output = self.process_predict(prediction, self.prob_th,
                                          self.num_classes, self.nms_th,
                                          self.inp_dim, orig_img_size)

            orig_img = cv2.cvtColor(np.asarray(img),
                                    cv2.COLOR_RGB2BGR)  # RGB => BGR
            if type(output) != int:
                # print('\n', x)
                car_color, car_direction, car_type = self.cls_draw_bbox(
                    output, orig_img)
                dst_path = self.dst_dir + '/' + os.path.split(x)[1]
                # if not os.path.exists(dst_path):
                # cv2.imwrite(dst_path, orig_img)

            if curr_path != pre_path and pre_path != '':
                start_length = os.path.split(os.path.split(pre_path)[0])[1]
                detect_color = max(color_dict, key=color_dict.get)
                detect_type = max(type_dict, key=type_dict.get)
                print("Tracklet %d detects " % tracklet_i, detect_color,
                      detect_type)
                # add_to_all(all_cars_per_camera, detect_color, detect_type)
                compare_query_append(query_pair, detect_color, detect_type,
                                     index_list_per_camera, tracklet_i,
                                     start_length)
                tracklet_i += 1

                color_dict.clear()
                type_dict.clear()

                curr_camera_id = x.split('/')[3]
                if curr_camera_id != pre_camera_id:
                    print("The query result on stream %d:" % stream_i,
                          index_list_per_camera)
                    index_list_all.append(deepcopy(index_list_per_camera))
                    index_list_per_camera.clear()

                    pre_camera_id = curr_camera_id

                    stream_i += 1
                    tracklet_i = 0
                    print("\n\nProcessing stream %d...\n" % stream_i)

            if car_color != None:
                if car_color not in color_dict:
                    color_dict[car_color] = 0
                color_dict[car_color] += 1

            if car_type != None:
                if car_type not in type_dict:
                    type_dict[car_type] = 0
                type_dict[car_type] += 1

            pre_path = curr_path

        # add the last one
        if pre_path != '':
            start_length = os.path.split(os.path.split(pre_path)[0])[1]
            detect_color = max(color_dict, key=color_dict.get)
            detect_type = max(type_dict, key=type_dict.get)
            print("Tracklet %d detects " % tracklet_i, detect_color,
                  detect_type)
            compare_query_append(query_pair, detect_color, detect_type,
                                 index_list_per_camera, tracklet_i,
                                 start_length)
            # print(all_cars_per_camera)
            color_dict.clear()
            type_dict.clear()

            print("The query result on stream %d:" % stream_i,
                  index_list_per_camera)
            index_list_all.append(deepcopy(index_list_per_camera))

        return index_list_all