Ejemplo n.º 1
0
def main():
    flg_det = PdxDet(model_path="../model/best/flg_det/", bs=8)
    for vid_name in os.listdir(args.input):
        print(vid_name)
        frame_data = []
        names = []
        name = vid_name.split(".")[0]
        video = Stream(
            osp.join(args.input, vid_name),
            osp.join(args.ann, name + ".txt"),
            itv_sparse=0,
            itv_dense=5,
        )

        for frame_idx, img in tqdm(video):
            # print(idx)

            # # 第一种方式推理类负责组batch
            frames, idxs, bbs = flg_det.add(img, frame_idx)
            # print(bbs)
            for f, idx, bb in zip(frames, idxs, bbs):
                if len(bb) != 0:
                    bb = bb[0]  # 这个网络最多检出一个起落架
                    save_path = osp.join(args.output,
                                         "{}-{}.png".format(name, idx))
                    try:
                        cv2.imwrite(save_path, crop(f, bb.square(256)))
                    except:
                        pass
Ejemplo n.º 2
0
def main():
    flg_det = PdxDet(model_path="../model/best/flg_det/", bs=args.bs)
    for vid_name in os.listdir(args.input):
        print(vid_name)
        name = vid_name.split(".")[0]
        video = Stream(
            osp.join(args.input, vid_name),
            # osp.join(args.ann, name + ".txt"),
            itv_sparse=2,
            # itv_dense=2,
        )
        os.mkdir(osp.join(args.output, "p" + vid_name))

        for frame_idx, img in tqdm(video, miniters=args.bs):
            frames, idxs, bbs = flg_det.add(img, frame_idx)
            for f, idx, bb in zip(frames, idxs, bbs):
                # print(bb)
                if len(bb) != 0:
                    bb = bb[0]  # 这个网络最多检出一个起落架
                    save_path = osp.join(
                        args.output,
                        "p" + vid_name,
                        "{}-{}.png".format(name, idx),
                    )
                    try:
                        cv2.imwrite(save_path, crop(f, bb.square(256)))
                    except:
                        pass
Ejemplo n.º 3
0
def main():
    # 1. 定义模型对象
    flg_det = PdxDet(model_path="../model/best/flg_det/", bs=4)
    person_det = PdxDet(model_path="../model/best/person_det_yolov3", bs=8, autoflush=False)

    for vid_name in os.listdir(args.input):
        print(vid_name)
        os.mkdir(osp.join(args.output, vid_name))

        video = Stream(osp.join(args.input, vid_name), osp.join(args.time, os.listdir(args.time)[0]))
        # TODO: 研究tqdm需要什么方法显示总数
        for fidx, img in video:
            # 检测出一个batch的起落架
            frames, fidxs, flgs_batch = flg_det.add(img, fidx)
            for frame, frame_idx, flgs in zip(frames, fidxs, flgs_batch):  # 对这些起落架中的每一个
                if len(flgs) != 0:
                    flg = flgs[0]
                    person_det.add(crop(frame, flg.square(256)), [flg, frame_idx])  # 添加到检测人的任务list中

            if len(flgs_batch) != 0:
                print("Gears detected: ", flgs_batch)

            if len(person_det.imgs) >= person_det.bs:
                count = 0
                r = person_det.flush()  # 进行人像检测
                print("People detected: ", r[2])
                for gear_square, info, people in zip(r[0], r[1], r[2]):  # 对一个batch中的每一张,每一张可能有多个人
                    f = frames[count]
                    flg = info[0]
                    fid = info[1]
                    count += 1
                    for pid, person in enumerate(people):
                        dbb(f, flg.square(256).transpose(person), "G")
                        patch = crop(f, flg.square(256).transpose(person).square(128))
                        if patch.shape == (128, 128, 3):
                            cv2.imwrite(
                                osp.join(args.output, vid_name, "{}-{}.png".format(pid, str(fid))), patch
                            )
                            # cv2.imshow("img", crop(gear_square, person))

                    dbb(f, flg, "R")
                    dpoint(f, flg.center(), "R")
                    dbb(f, flg.square(256), "B")
                    #
                    cv2.imshow("img", f)
                    cv2.waitKey(2)
Ejemplo n.º 4
0
def main():
    for f in sorted(os.listdir(args.img_path)):
        name = f.split(".")[0]
        bbs = xml2bb(osp.join(args.ann_path, name + ".xml"))
        bbs.sort(key=lambda b: (b.wmin, b.hmin))
        img = cv2.imread(osp.join(args.img_path, name + ".png"))
        for idx, bb in enumerate(bbs):
            bbs = bb.square(64)
            if not bb.spill() and not bbs.spill() and not bbs < (64, 64):
                print(bbs)
                patch = crop(img, bbs)
                assert patch.shape == (64, 64, 3), print(patch.shape)
                cv2.imwrite(
                    osp.join(args.output, "{}-{}.png".format(name, idx)),
                    patch)
            else:
                print("error", bbs)
Ejemplo n.º 5
0
def main():
    # 1. 定义模型对象
    flg_det = PdxDet(model_path="../model/best/flg_det/", bs=2)
    person_det = PdxDet(model_path="../model/best/person_det_yolov3",
                        bs=2,
                        autoflush=False)
    person_clas = HumanClas(mode="predict")

    for vid_name in os.listdir(args.input):
        # print(vid_name)
        #os.mkdir(osp.join(args.output, vid_name))

        video = Stream(
            osp.join(args.input, vid_name),
            osp.join(args.time,
                     os.listdir(args.time)[0]),
            itv_sparse=3,
            itv_dense=3,
        )
        mem_len = 8
        history = [False for _ in range(mem_len)]
        thresh = 0.5
        # TODO: 研究tqdm需要什么方法显示总数
        #res_f = open(osp.join("H:/W_S/Graduation_Project/plane/time-out", vid_name.rstrip(".mp4") + ".txt"), "w")
        for fidx, img in video:
            # 检测出一个batch的起落架
            frames, fidxs, flgs_batch = flg_det.add(img, fidx)
            for frame, frame_idx, flgs in zip(frames, fidxs,
                                              flgs_batch):  # 对这些起落架中的每一个
                if len(flgs) != 0:
                    flg = flgs[0]
                    person_det.add(crop(frame, flg.square(256)),
                                   [flg, frame_idx, frame])  # 添加到检测人的任务list中
            # print("Gears detected: ", flgs_batch)
            if len(person_det.imgs) >= person_det.bs:
                r = person_det.flush()  # 进行人像检测
                # print("People detected: ", r[2])
                for gear_square, info, people in zip(
                        r[0], r[1], r[2]):  # 对一个batch中的每一张,每一张可能有多个人
                    flg = info[0]
                    fid = info[1]
                    f = info[2]
                    # TODO: 一个batch推理
                    has_positive = False
                    for pid, person in enumerate(people):
                        patch = crop(
                            f,
                            flg.square(256).transpose(person).square(64))
                        res = person_clas.predict(patch)
                        if res:
                            has_positive = True
                        # dbb(f, flg.square(256).transpose(person).region([1.8, 1.8]), "G" if res else "R")
                        dpoint(f,
                               flg.square(256).transpose(person).center(),
                               "G" if res else "R")

                    for idx in range(mem_len - 1, 0, -1):
                        history[idx] = history[idx - 1]
                    history[0] = has_positive
                    prediction = "Positive" if np.sum(
                        history) > mem_len * thresh else "Negative"
                    print(has_positive)
                    print(history, np.sum(history), prediction)
                    #print(fid, has_positive, prediction, np.sum(history), file=res_f)
                    #res_f.flush()
                    dbb(f, flg, "B")
                    dpoint(f, flg.center(), "B")
                    dbb(f, flg.square(256), "B")
                    cv2.imshow("img", f)
                    cv2.waitKey()

                    # cv2.imwrite(
                    #     osp.join(args.output, vid_name, str(fid).zfill(6) + ".png"),
                    #     crop(f, flg.square(300)),
                    # )
        #res_f.close()
        shutil.move(osp.join(args.output, vid_name),
                    osp.join(args.output, "finish"))
Ejemplo n.º 6
0
import os
import os.path as osp
from tqdm import tqdm

import cv2

from util.util import xml2bb, Stream, crop

vid_dir = "/home/aistudio/plane/视频分类/1920/n/"
bb_dir = "/home/aistudio/plane/视频分类/起落架bb/"
out_dir = "/home/aistudio/plane/视频分类/256-frame/n/"

for name in tqdm(os.listdir(vid_dir)):
    name = name.split(".")[0]
    bb_path = osp.join(bb_dir, name + ".xml")
    if os.path.exists(bb_path):
        print(name)
        bb = xml2bb(bb_path, "gear")[0]
        vid = cv2.VideoCapture(osp.join(vid_dir, name + ".mp4"))
        stream = Stream(osp.join(vid_dir, name + ".mp4"), itv_sparse=2)

        os.mkdir(osp.join(out_dir, name))

        for idx, img in stream:
            out_path = osp.join(out_dir, name,
                                "{}-{}.png".format(name,
                                                   str(idx).zfill(6)))
            img = crop(img, bb.square(256), do_pad=True)
            cv2.imwrite(out_path, img)