Esempio n. 1
0
def display(preds, imgs, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            # cv2.imwrite(f'test/img_inferred_d{compound_coef}_this_repo_{i}.jpg', imgs[i])
            cv2.imwrite(
                f'test_result/d0_epoch71_1031/img_inferred_d{compound_coef}_this_repo_{i}.jpg',
                imgs[i])
def display(cur_frame, preds, imgs, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            #cv2.waitKey(0)

        if imwrite:
            if not os.path.exists(img_path):
                os.makedirs(img_path)
            cv2.imwrite(
                f'{img_path}/img_inferred_d{compound_coef}_this_repo_{cur_frame}.jpg',
                imgs[i])
Esempio n. 3
0
def display(preds, imgs, imshow=True, showtime=0, imwrite=False):
    for i, img_name in zip(range(len(imgs)), img_names):
        # if len(preds[i]['rois']) == 0:                    # if model dosen't detect object, not show image
        #     continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])
            print(obj)

        if imwrite:
            img = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB)
            cv2.imwrite(f'test/img_inferred_d{compound_coef}_{img_name}', img)

        if imshow:
            img = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB)
            cv2.namedWindow(f'{img_name}', cv2.WINDOW_NORMAL)
            cv2.imshow(f'{img_name}', img)
            cv2.waitKey(showtime)
            cv2.destroyAllWindows()
Esempio n. 4
0
    def display(preds, imgs, imshow=True, imwrite=False, img_id=1):
        for i in range(len(imgs)):
            if len(preds[i]['rois']) == 0:
                continue

            imgs[i] = imgs[i].copy()
            imgs[i] = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB)

            for j in range(len(preds[i]['rois'])):
                x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
                obj = obj_list[preds[i]['class_ids'][j]]
                score = float(preds[i]['scores'][j])
                plot_one_box(imgs[i], [x1, y1, x2, y2],
                             label=obj,
                             score=score,
                             color=color_list[get_index_label(obj, obj_list)])

            if imshow:
                cv2.imshow('img', imgs[i])
                cv2.waitKey(0)

            if imwrite:

                str1 = 'test/' + str(img_id) + '.jpg'
                cv2.imwrite(str1, imgs[i])
def display(preds, imgs, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            #----------以下在Colab會crash, 必須改用plt-------------
            #cv2.imshow('img', imgs[i])
            #cv2.waitKey(0)
            #------------------------------------------------------
            plt.imshow(imgs[i])
            plt.show()

        if imwrite:
            cv2.imwrite(
                f'test/img_inferred_d{compound_coef}_this_repo_{i}.jpg',
                imgs[i])
Esempio n. 6
0
    def display(self, preds, imgs, imshow=True, imwrite=False):
        scores = []
        labels = []
        bboxes = []
        for i in range(len(imgs)):
            if len(preds[i]['rois']) == 0:
                continue
            for j in range(len(preds[i]['rois'])):
                x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
                obj = self.system_dict["params"]["obj_list"][preds[i]
                                                             ['class_ids'][j]]
                score = float(preds[i]['scores'][j])
                if (score > self.system_dict["params"]["threshold"]):
                    scores.append(score)
                    labels.append(obj)
                    bboxes.append([x1, y1, x2, y2])
                    plot_one_box(imgs[i], [x1, y1, x2, y2],
                                 label=obj,
                                 score=score,
                                 color=self.system_dict["local"]["color_list"]
                                 [get_index_label(
                                     obj,
                                     self.system_dict["params"]["obj_list"])])

            if imshow:
                cv2.imshow('img', imgs[i])
                cv2.waitKey(0)

            if imwrite:
                cv2.imwrite('output.jpg', imgs[i])

        return scores, labels, bboxes
Esempio n. 7
0
def display(preds, imgs, imshow=True, imwrite=False, write_dir=None):
    # for i in range(len(imgs)):
    for i, (name, img) in enumerate(imgs.items()):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(
                img,  #imgs[i],
                [x1, y1, x2, y2],
                label=obj,
                score=score,
                color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            os.makedirs(write_dir, exist_ok=True)
            write_path = os.path.join(write_dir, f'{name}.jpg')
            cv2.imwrite(write_path, img)
Esempio n. 8
0
    def display(preds, imgs, imshow=True, imwrite=False):
        for i in range(len(imgs)):
            if len(preds[i]['rois']) == 0:
                continue

            for j in range(len(preds[i]['rois'])):
                x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
                obj = obj_list[preds[i]['class_ids'][j]]
                score = float(preds[i]['scores'][j])
                plot_one_box(imgs[i], [x1, y1, x2, y2],
                             label=obj,
                             score=score,
                             color=color_list[get_index_label(obj, obj_list)])

            if imshow:
                cv2.imshow('img', imgs[i])
                cv2.waitKey(0)

            if imwrite:
                img = img_path.split('/')[-1].split('.')[0]
                #                 pdb.set_trace()
                img_name = f'{img}_img_inferred_d{compound_coef}_this_repo_{i}.jpg'
                img_save_path = os.path.join(img_save_folder, img_name)
                print(img_save_path)
                cv2.imwrite(img_save_path, imgs[i])
Esempio n. 9
0
def run(img_dir, output_dir, img_size, num_classes, weights, conf_thres,
        nms_thres, show):
    shutil.rmtree(output_dir, ignore_errors=True)
    os.makedirs(output_dir, exist_ok=True)
    model = YOLOV3(num_classes, img_size)
    state_dict = torch.load(weights, map_location='cpu')
    model.load_state_dict(state_dict['model'])
    model = model.to(device)
    model.eval()
    colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(80)]
    names = [n for n in os.listdir(img_dir) if osp.splitext(n)[1] in IMG_EXT]
    names.sort()
    for name in tqdm(names):
        img = cv2.imread(osp.join(img_dir, name))
        det = inference(model, [img], img_size, conf_thres, nms_thres)[0]
        det_txt = []
        # Write results
        for *xyxy, conf, _, cls in det:
            det_txt.append(' '.join(['%g'] * 6) % (*xyxy, cls, conf))
            if show:  # Add bbox to image
                label = '%d %.2f' % (int(cls), conf)
                plot_one_box(xyxy, img, label=label, color=colors[int(cls)])
        with open(osp.join(output_dir,
                           osp.splitext(name)[0] + '.txt'), 'w') as f:
            f.write('\n'.join(det_txt))
        # Stream results
        if show:
            cv2.imshow('yolo', img)
            cv2.waitKey(1)
        # Save results (image with detections)
        cv2.imwrite(osp.join(output_dir, name), img)
def box(preds, imgs, color_list, obj_list, imshow=False, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj,score=score,color=color_list[get_index_label(obj, obj_list)])


    return imgs[0]
Esempio n. 11
0
 def draw_result(self, img, det, show=False):
     if det is None or len(det) == 0:
         if show:
             locks.imshow('result', img)
         return
     for det_pack in det:
         xyxy = []
         for c in [det_pack.x1, det_pack.y1, det_pack.x2, det_pack.y2]:
             xyxy.append(c)
         conf = det_pack.class_conf
         label = '%s %.2f %.2f' % (det_pack.class_name, conf,
                                   det_pack.object_conf)
         plot_one_box(xyxy, img, label=label, color=det_pack.color)
     if show:
         locks.imshow('result', img)
def display(preds,
            imgs,
            compound_coef,
            obj_list=None,
            imshow=True,
            imwrite=False,
            debug=False):
    if obj_list is None:
        obj_list = ['person']
    color_list = standard_to_bgr(STANDARD_COLORS)

    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            if debug:
                cv2.imshow('img', imgs[i])
                cv2.waitKey(0)
            continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            os.system("mkdir -p ./assets/predictions")
            cv2.imwrite(
                f'./assets/predictions/img_inferred_d{compound_coef}_this_repo_{i}.jpg',
                imgs[i])

    if imwrite:
        image_folder = './assets/predictions'
        image_files = [
            image_folder + '/' + img for img in os.listdir(image_folder)
            if img.endswith(".jpg")
        ]
        clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(
            image_files, fps=1)
        clip.write_videofile('./assets/predictions_testset.mp4')
Esempio n. 13
0
def display(preds, imgs, base_name, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj,score=score,color=color_list[get_index_label(obj, obj_list)])


        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            cv2.imwrite(f'test003/img_inferred_d{compound_coef}_{base_name}.jpg', imgs[i])
Esempio n. 14
0
def display(preds, imgs, imwrite=True):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imwrite:
            cv2.imwrite(
                f'test/img_inferred_d{compound_coef}_this_repo_{i}.jpg',
                imgs[i])
Esempio n. 15
0
 def __save_image(self, preds, imgs, imwrite=True):
     color_list = standard_to_bgr(STANDARD_COLORS)
     for i in range(len(imgs)):
         if len(preds[i]['rois']) == 0:
             continue
         imgs[i] = imgs[i].copy()
         for j in range(len(preds[i]['rois'])):
             x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
             obj = self.obj_list[preds[i]['class_ids'][j]]
             score = float(preds[i]['scores'][j])
             plot_one_box(imgs[i], [x1, y1, x2, y2],
                          label=obj,
                          score=score,
                          color=color_list[get_index_label(
                              obj, self.obj_list)])
         if imwrite:
             cv2.imwrite(
                 f'test/img_inferred_d{self.compound_coef}_this_repo_{i}.jpg',
                 imgs[i])
Esempio n. 16
0
def display(preds, imgs, imshow=True, imwrite=False):
    for i, img_name in zip(range(len(imgs)), img_names):

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj, score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            img = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB)
            cv2.imshow('video', img)
            cv2.waitKey(1)

        if imwrite:
            cv2.imwrite(f'test/img_inferred_d{compound_coef}_{img_name}', imgs[i])
def display(preds, imgs, img_path, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj,score=score,color=color_list[get_index_label(obj, obj_list)])


        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            write_path = img_path.split(".")[0] + "_predicted.jpg";
            cv2.imwrite(write_path, imgs[i]);
    def draw(self, frames_dict, preds_dict, judgements_dict, fps):
        vis_imgs_dict = {}
        for name in preds_dict.keys():
            frame = frames_dict[name]
            pred = preds_dict[name]
            judgement = judgements_dict[name]

            img = self.draw_static_contents(frame, name)
            img = self.draw_fps(img, fps)

            label = 'person'
            if pred is not None:
                for x1, y1, x2, y2 in pred:
                    box_area = (x2 - x1) * (y2 - y1)
                    # 过滤掉过大和过小的识别框
                    if min_object_bbox_area_dict[
                            name] <= box_area <= max_object_bbox_area_dict[
                                name]:
                        plot_one_box((x1, y1, x2, y2),
                                     img,
                                     label=label,
                                     color=(225, 225, 0))

            if judgement:
                img = cv2.putText(img,
                                  text='Kick your head!!!',
                                  org=(30, 25),
                                  fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                  fontScale=1.2,
                                  color=(0, 0, 255),
                                  thickness=2)
            else:
                img = cv2.putText(img,
                                  text='Safe working',
                                  org=(30, 25),
                                  fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                  fontScale=1.2,
                                  color=(0, 255, 0),
                                  thickness=2)
            vis_imgs_dict[name] = img
        return vis_imgs_dict
Esempio n. 19
0
def display(preds, imgs, outdir, files, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            out_name = outdir + files[i]
            cv2.imwrite(out_name, imgs[i])
Esempio n. 20
0
def display(out_1, out_2, imgs, imshow=True, showtime=0, imwrite=False):
    # if len(preds[i]['rois']) == 0:                    # if model dosen't detect object, not show image
    #     continue

    for img, out_1 in zip(imgs, out_1):
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        for i in range(len(out_1['rois'])):
            ox1, oy1, ox2, oy2 = out_1['rois'][i].astype(np.int)
            obj_1 = obj_list_1[out_1['class_ids'][i]]
            score = float(out_1['scores'][i])
            color = color_list[get_index_label(obj_1, obj_list_1)]
            plot_one_box(img, [ox1, oy1, ox2, oy2],
                         label=obj_1,
                         score=score,
                         color=color)
            print(obj_1)
            print(f'obj {i}:', ox1, oy1, ox2, oy2)
            for j in range(len(out_2[i]['rois'])):
                dx1, dy1, dx2, dy2 = out_2[i]['rois'][j].astype(np.int)
                obj_2 = obj_list_2[out_2[i]['class_ids'][j]].split('_')[-1]
                score = float(out_2[i]['scores'][j])
                plot_one_box(img, [dx1 + ox1, dy1 + oy1, dx2 + ox1, dy2 + oy1],
                             label=obj_2,
                             score=score,
                             color=color)
                print(obj_2)
                print('damage :', dx1, dy1, dx2, dy2)
                print('change :', dx1 + ox1, dy1 + oy1, dx2 + ox1, dy2 + oy1)

        if imshow:
            # print(f'{img_name}')
            cv2.namedWindow('__', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('__', 1500, 1000)
            cv2.imshow('__', img)
            # cv2.waitKey(0)
            key = cv2.waitKey(showtime)
            if key == ord('p'):
                cv2.waitKey(-1)
Esempio n. 21
0
def display(names, preds, imgs, imshow=True, imwrite=False, save_bbox=True):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue
        f = open('test/' + names[i] + '.txt', "w")
        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])
            if save_bbox and obj == 'cell phone':
                f.write(
                    str(67) + ' ' + str(score) + ' ' + str(x1) + ' ' +
                    str(y1) + ' ' + str(x2) + ' ' + str(y2) + '\n')

        if imshow:
            cv2.imshow(names[i], imgs[i])
            cv2.waitKey(0)
        if imwrite:
            cv2.imwrite('test/' + names[i] + '.jpg', imgs[i])
Esempio n. 22
0
def display(preds, imgs, imshow=True, imwrite=True):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])

            plot_one_box(imgs[i], [x1, y1, x2, y2], label=obj, score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            imgs[i] = cv2.cvtColor(imgs[i], cv2.COLOR_BGR2RGB)
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            cv2.imwrite(f'test/d{compound_coef}_{haha}', imgs[i])
            print('nmsl')
Esempio n. 23
0
def analyse_rgb():
    t = time.time()
    img0 = get_bgr()
    img_org = img0[:, :, ::-1]
    img, _, _, _ = letterbox(img0, new_shape=image_size)
    img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB
    img = np.ascontiguousarray(img, dtype=np.float32)
    img /= 255.0

    img = torch.from_numpy(img).unsqueeze(0).to('cpu')
    pred, _ = model(img)

    det = non_max_suppression(pred, 0.6, 0.5)[0]

    if det is not None and len(det) > 0:
        detected_classes = []
        print('+ Rescaling model')
        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()

        for *coordinates, conf, cls_conf, cls in det:
            if classes[int(cls)] in RISKY_CLASSES:
                label = '%s %.2f' % (classes[int(cls)], conf)
                plot_one_box(coordinates, img0, label=label, color=colors[int(cls)])
                print(f"+ Detected {classes[int(cls)]}")
                detected_classes.append({classes[int(cls)]: {'x': coordinates[0], 'y': coordinates[1]}})

        n = []
        for counter in detected_classes:
            width = img0.shape[1]
            x, y = counter[list(counter.keys())[0]].values()
            phi = (x / width * 2 - 1) * (CAMERA_FOV / 2)
            n.append(f"{list(counter.keys())[0]};{phi};-1|")

        s = str(''.join(str(x) for x in n)[:-1])

        return {"raw": get_rgb(), "done": img0, "objects": s}
    return {'raw': img_org, 'done': img_org, 'objects': ''}
Esempio n. 24
0
    def display(preds, imgs, imshow=True, imwrite=False):
        for i in range(len(imgs)):
            if len(preds[i]['rois']) == 0:
                continue

            imgs[i] = imgs[i].copy()

            for j in range(len(preds[i]['rois'])):
                x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
                obj = obj_list[preds[i]['class_ids'][j]]
                score = float(preds[i]['scores'][j])

                list_image_id.append(img_path.split('/')[-1])
                list_class.append(obj)
                list_score.append(score)
                list_xmin.append(x1)
                list_ymin.append(y1)
                list_xmax.append(x2)
                list_ymax.append(y2)

                plot_one_box(imgs[i], [x1, y1, x2, y2],
                             label=obj,
                             score=score,
                             color=color_list[get_index_label(obj, obj_list)])
Esempio n. 25
0
def display(preds, imgs, imshow=True, imwrite=False):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            print(x1, y1, x2, y2, obj, score)
            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            predicated_path = 'demo_jpg/{0}_infer.jpg'.format(
                config.dataset_name)
            cv2.imwrite(predicated_path, imgs[i])
            print("write predicated result in:{0}".format(predicated_path))
def display(preds, imgs, imshow=True, imwrite=False, start=0):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            if obj in obj_interest:
                plot_one_box(imgs[i], [x1, y1, x2, y2],
                             label=obj,
                             score=score,
                             color=color_list[get_index_label(obj, obj_list)])

        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            cv2.imwrite(
                img_root + 'Result' +
                f'/img_inferred_d{compound_coef}_this_repo_{i+start}.jpg',
                imgs[i])
Esempio n. 27
0
def display(preds, imgs, imshow=True, imwrite=False):
    x = []
    y = []
    result_1 = []
    result_2 = []
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        for j in range(len(preds[i]['rois'])):
            x1, y1, x2, y2 = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])
            x, y = plot_one_box(imgs[i], [x1, y1, x2, y2],
                                label=obj,
                                score=score,
                                color=color_list[get_index_label(
                                    obj, obj_list)])
            result_1.append(x)
            result_2.append(y)

    return result_1, result_2
Esempio n. 28
0
    def __find_intersections_v1(self):
        for flag_idx in self.class_det["Flag"]:

            # 1. Collect total number of intersection with detected Person object
            detected_intersection = []
            for person_idx in self.class_det["Person"]:
                flag_xyxy = get_det_xyxy(self.det[flag_idx])
                person_xyxy = get_det_xyxy(self.det[person_idx])
                if not self.opt.maximize_latency:
                    if self.plot_old_person_bbox:  # Plot in bbox
                        plot_one_box(person_xyxy,
                                     self.img_mbbox,
                                     label="Person-%s" % str(person_idx),
                                     color=self.rgb["MMBox"])

                person_xyxy = self.__enlarge_bbox(
                    person_xyxy)  # enlarge bbox size

                # Intersection occurs here
                if self.__is_intersect(flag_xyxy, person_xyxy):
                    detected_intersection.append(person_idx)

                # Testing only: Try plotting bounding boxes
                if not self.opt.maximize_latency:
                    plot_one_box(person_xyxy,
                                 self.img_enlarge,
                                 label="EnPer-%s" % str(person_idx),
                                 color=self.rgb["EnlargedPerson"])
                    plot_one_box(flag_xyxy,
                                 self.img_enlarge,
                                 label="Flag-%s" % str(flag_idx),
                                 color=self.rgb["Person"])

            self.__verify_intersection(flag_idx, detected_intersection)

        # save MB-Box illustration
        # print(" >>>>> self.save_path = ", self.save_path)
        # cv2.imwrite(self.save_path+, self.img_mbbox)
        cv2.imwrite(
            self.save_path.replace('.png', '') + "-mbbox.png", self.img_mbbox)
        cv2.imwrite(
            self.save_path.replace('.png', '') + "-enlarge.png",
            self.img_enlarge)
Esempio n. 29
0
def detect(model, dataset, args):
    use_cuda = not args.cpu
    threshold = args.threshold
    iou_threshold = args.iou_threshold
    input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536, 1536]
    input_size = input_sizes[args.compound_coef]

    img_dir = os.path.join(dataset, dataset, 'images')
    bbox_dir = os.path.join(dataset, dataset, 'annotations', 'bboxes')
    vis_dir = os.path.join(dataset, 'det_vis')
    prepare_dirs(bbox_dir, vis_dir)

    img_paths = [os.path.join(img_dir, f) for f in os.listdir(img_dir)]
    for img_path in tqdm(img_paths):
        ori_imgs, framed_imgs, framed_metas = preprocess(img_path,
                                                         max_size=input_size)
        ori_img = ori_imgs[0]
        img_id = os.path.basename(img_path).split('.')[0]

        json_byhand = os.path.join(dataset, 'annotation_byhand',
                                   img_id + '.json')
        if os.path.exists(json_byhand):
            with open(json_byhand) as f:
                annotation_byhand = json.load(f)
                points = annotation_byhand['shapes'][0]['points']
                max_box = points[0] + points[1]
        else:
            if args.update:  # only process annotations by hand
                continue
            if use_cuda:
                x = torch.stack(
                    [torch.from_numpy(fi).cuda() for fi in framed_imgs], 0)
            else:
                x = torch.stack([torch.from_numpy(ft) for fi in framed_imgs],
                                0)

            x = x.to(torch.float32).permute(0, 3, 1, 2)

            with torch.no_grad():
                features, regression, classification, anchors = model(x)

                regressBoxes = BBoxTransform()
                clipBoxes = ClipBoxes()

                preds = postprocess(x, anchors, regression, classification,
                                    regressBoxes, clipBoxes, threshold,
                                    iou_threshold)

                pred = invert_affine(framed_metas, preds)[0]

            max_area, max_box = 0, [0, 0, ori_img.shape[1], ori_img.shape[0]]
            for det, class_id in zip(pred['rois'], pred['class_ids']):
                if not class_id == 0:
                    continue
                x1, y1, x2, y2 = det.astype(np.int)
                w, h = x2 - x1, y2 - y1
                area = w * h
                if area > max_area:
                    max_area = area
                    max_box = [x1, y1, x2, y2]

        plot_one_box(ori_img, max_box, color=[255, 0, 255], line_thickness=2)
        if args.vis:
            cv2.imwrite(os.path.join(vis_dir, img_id + '.jpg'), ori_img)

        bbox_file = os.path.join(bbox_dir, img_id + '.txt')
        with open(bbox_file, 'w') as f:
            bbox_info = ' '.join(map(str, max_box))
            f.write(bbox_info)
Esempio n. 30
0
from detector import YoloV3Predictor
import sys
from PIL import Image
import numpy as np
import json
import random
from utils.utils import plot_one_box

if __name__ == '__main__':
    model = YoloV3Predictor(0, 416, half_precision=False)
    if len(sys.argv) == 1:
        img = np.array(Image.open('data/samples/bus.jpg'))
    else:
        img = np.array(Image.open(sys.argv[1]))

    res = model.predict(img, conf_thres=0.5, agnostic_iou=False)

    with open('cfg/labels.json', 'r') as f:
        label_dict = json.load(f)
    colors = [[random.randint(0, 255) for _ in range(3)]
              for _ in range(len(label_dict))]
    for *xyxy, conf, cls in res[0]:
        label = '%s %.2f' % (list(label_dict.keys())[int(cls)], conf)
        plot_one_box(xyxy, img, label=label, color=colors[int(cls)])
    Image.fromarray(img).save('output/result.jpg')