Esempio n. 1
0
capture = cv2.VideoCapture(
    'D:\SemanticSegmentation\Mask_RCNN\chicken_beautiful.mp4')
size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))

codec = cv2.VideoWriter_fourcc(*'DIVX')
output = cv2.VideoWriter(r'C:\Users\surface\Desktop\videofile_masked.avi',
                         codec, 25.0, size)

while (capture.isOpened()):
    ret, frame = capture.read()
    if ret:
        # add mask to frame
        print("yes")
        results = model.detect([frame], verbose=0)
        r = results[0]
        frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'],
                                  class_names, r['scores'])
        output.write(frame)
        print("finish")
        # cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

capture.release()
output.release()
cv2.destroyAllWindows()
Esempio n. 2
0
print(datetime.now() - startTime1)"""
images = []
while (capture.isOpened()):
    ret, frame = capture.read()
    if ret:
        # add mask to frame
        images.append(frame)
        if len(images) == 32:
            startTime = datetime.now()
            results = model.detect(images, verbose=1)
            print("il a fallu ", (datetime.now() - startTime),
                  "  secondes soit ", (datetime.now() - startTime) / 32,
                  " par image")
            for i, item in enumerate(zip(images, results)):
                image = item[0]
                r = item[1]
                frame = display_instances(image, r['rois'], r['masks'],
                                          r['class_ids'], class_names, None)
                output.write(frame)
                #cv2.imshow('frame', frame)
            images = []

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    else:
        break

capture.release()
output.release()
cv2.destroyAllWindows()
print(datetime.now() - startTime1)
while (1):  #capture.isOpened()
    ret, frame = capture.read()  # ret 받은 이미지가 있는지 여부 , 각 프레임 받기

    if ret and masking == 0:
        results = model.detect(
            [frame],
            verbose=0)  # 모델 사용 -> 모델에서 Forward Compute 해서 Detection 결과를 반환
        r = results[0]

        #  print("visualize_cv2 LINE 131 :", r)
        #  print("class names :", class_names)
        #{'rois': array([[1061, 11, 1280,  201],

        masking = masking + 1
        frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'],
                                  class_names, r['scores'], Video_w,
                                  Video_w_20, Video_w_80, Video_h_35)
        # display_instances를 호출(수행)할 때 마다
        output.write(frame)
        cv2.imshow('frame', frame)  #원본 영상에 Masking이 입혀진 영상 보여주기 함수

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    elif ret and masking > 0:
        masking = masking + 1
        if masking == 2:  # 몇 프레임 당 Compute할것인지
            masking = 0
        if cv2.waitKey(1) & 0xFF == ord(
                'q'):  # waitkey & 0xFF = 1111 1111 == 'q'
            break
        output.write(frame)  # Model forward Compute를 거치지 않고 바로 출력
Esempio n. 4
0
def process_clip(video):
    ## Calculate video duration
    v = cv2.VideoCapture(video)
    v.set(cv2.CAP_PROP_POS_AVI_RATIO, 1)
    duration = v.get(cv2.CAP_PROP_POS_MSEC)

    frameCount = int(v.get(cv2.CAP_PROP_FRAME_COUNT))
    print('duration = {}'.format(duration))
    print('number of frames = {}'.format(frameCount))

    # the 1st frame is frame 0, not 1, so "5335" means after the last frame
    POS_FRAMES = v.get(cv2.CAP_PROP_POS_FRAMES)
    FRAME_COUNT = v.get(cv2.CAP_PROP_FRAME_COUNT)
    print('POS_FRAMES = ' + str(POS_FRAMES))
    print('FRAME_COUNT = ' + str(FRAME_COUNT))

    v.release()

    ## Output video frame time, frame content, and bounding boxes to dictionary
    capture = cv2.VideoCapture(video)
    size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
            int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    codec = cv2.VideoWriter_fourcc(*'DIVX')
    fps = 30
    output = cv2.VideoWriter('{}_masked.avi'.format(video), codec, fps,
                             (640, 360))

    count = 1
    out_dict = {}

    while (capture.isOpened()):
        # cap.read() returns a bool (True/False).
        # If frame is read correctly, it will be True. So you can check end of the video by checking this return value
        ret, frame = capture.read()
        if ret:
            road_frame = frame[0:360, 640:1280]
            # add mask to frame
            results = model.detect([road_frame], verbose=0)
            r = results[0]
            road_frame = display_instances(road_frame, r['rois'], r['masks'],
                                           r['class_ids'], class_names,
                                           r['scores'])
            output.write(road_frame)

            out_dict[count] = {}
            out_dict[count]['cls_id'] = r['class_ids']
            out_dict[count]['rois'] = r['rois']

            print('[INFO] Frame {}/{}'.format(count, frameCount))
            count += 1
        else:
            break

    capture.release()
    output.release()

    ## Put the video data into a JSON file
    frame2content = {}
    for k, v in out_dict.items():
        objs = [class_names[i] for i in v["cls_id"]]
        obj2num = dict(collections.Counter(objs))
        _objs = []
        for o in objs:
            _objs.append(o + "_" + str(obj2num[o]))
            obj2num[o] -= 1

        obj2roi = {}
        for idx, o in enumerate(_objs):
            obj2roi[o] = list(map(int, list(v["rois"][idx, :])))
        frame2content[k] = obj2roi

    json_log = open('{}.json'.format(video), 'w')
    json.dump(frame2content, json_log)
    json_log.close()
Esempio n. 5
0
                    else:                   
                        if abs(h-h_prev2) < threshold and abs(w-w_prev2) < threshold:
                            h_prev, w_prev, i_prev = h_prev2, w_prev2, i_prev2
                        else:
                            remove.append(i)
        for r in list(reversed(remove)):
            print ("Deleted row ", r)
            r['class_ids'] = np.delete(r['class_ids'], (r), axis=0)
            r['rois'] = np.delete(r['rois'], (r), axis=0)
            r['masks'] = np.delete(r['masks'], (r), axis=0)
            r['scores'] = np.delete(r['scores'], (r), axis=0)
        
        if h_prev2 is not None:
            print ("Deleted row ", i_prev2)
            r['class_ids'] = np.delete(r['class_ids'], (i_prev2), axis=0)
            r['rois'] = np.delete(r['rois'], (i_prev2), axis=0)
            r['masks'] = np.delete(r['masks'], (i_prev2), axis=0)
            r['scores'] = np.delete(r['scores'], (i_prev2), axis=0)
                        
        visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'], 
                                    class_names, r['scores'], save=args.save_demo, writer=out, dtype=t)
        if args.image is not None and not args.save_demo or args.image_dir is not None:
            c = cv2.waitKey()
            if args.image_dir is not None:
                if c==81:
                    ind-=2
    if args.video is not None:
        cap.release()
        out.release()
    cv2.destroyAllWindows()
Esempio n. 6
0
        actual_powerbanks = 0 if len(actual_powerbanks)==0 else actual_powerbanks[0] 
        
        unique, counts = np.unique(cls2, return_counts=True)
        rotated_powerbanks = counts[unique==powerbank_index]
        rotated_powerbanks = 0 if len(rotated_powerbanks)==0 else rotated_powerbanks[0] 
        
        if actual_powerbanks < rotated_powerbanks:
            for _ in range(rotated_powerbanks - actual_powerbanks):
                cls1 = np.append(cls1,powerbank_index)
                scores1 = np.append(scores1, '0.99') #placeholder values
                bboxes1.append([0,0,5,5]) #placeholder values to be changed for visualization if necessary
        image = imutils.rotate_bound(image, 90)
        '''
        visualize.display_instances(image,
                                    np.array(bboxes),
                                    None,
                                    classes,
                                    class_names,
                                    scores,
                                    save=args.save_demo,
                                    writer=out,
                                    ind=jnd)

        if args.image is not None and not args.save_demo or args.image_dir is not None:
            cv2.waitKey()

    if args.video is not None:
        cap.release()
        out.release()
    cv2.destroyAllWindows()