def button2Function(self): capture = cv2.VideoCapture(0) capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) self.lb_3.setText("*****진행중*****") self.lb_3.repaint() while True: ret, frame = capture.read() results = model.detect([frame], verbose=0) r = results[0] # frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) cv2.imshow('frame', frame) k = cv2.waitKey(0) if cv2.waitKey(1) & 0xFF == ord('q'): break elif k == ord('s'): cv2.VideoWriter('mycam.avi', codec, fc, (int(cap.get(3)), int(cap.get(4)))) capture.release() cv2.destroyAllWindows() self.lb_4.setText("완료") self.lb_4.repaint() capture.release() cv2.destroyAllWindows() self.lb_4.setText("완료") self.lb_4.repaint()
def button1Function(self): capture = cv2.VideoCapture("kmm.mov") size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) codec = cv2.VideoWriter_fourcc(*'DIVX') output = cv2.VideoWriter('1.mp4', codec, 60.0, size) print("btn_1 Clicked!!!!!!!!!!222") self.lb_3.setText("*****진행중*****") self.lb_3.repaint() print("btn_1 Clicked!!!!!!!!!!333") while (capture.isOpened()): print("btn_1 Clicked!!!!!!!!!!444444") ret, frame = capture.read() if ret: print("btn_1 Clicked!!!!!!!!55555555") # add mask to frame results = model.detect([frame], verbose=1) r = results[0] masks = r['masks'][:, :, r['class_ids'] == 44] mask = np.sum(masks, axis=2).astype(np.bool) mask_3d = np.repeat(np.expand_dims(mask, axis=2), 3, axis=2).astype(np.uint8) blurred_img = cv2.blur(frame, (101, 101)) #블러 mask_3d_blurred = cv2.medianBlur(mask_3d, 9) person_mask = mask_3d_blurred * blurred_img.astype( np.float32) #배경 그대로, 사람 블러 bg_mask = (1 - mask_3d_blurred) * frame.astype(np.float32) out = (person_mask + bg_mask).astype(np.uint8) output.write(out) out = cv2.resize( out, (int(out.shape[1] * scaler), int(out.shape[0] * scaler))) cv2.imshow('out', out) if cv2.waitKey(1) & 0xFF == ord('q'): print("btn_1 Clicked!!!!!!!!!!666666") break else: break capture.release() output.release() cv2.destroyAllWindows() self.lb_4.setText("완료") self.lb_4.repaint()
capture = cv2.VideoCapture( 'D:\SemanticSegmentation\Mask_RCNN\chicken_beautiful.mp4') size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) codec = cv2.VideoWriter_fourcc(*'DIVX') output = cv2.VideoWriter(r'C:\Users\surface\Desktop\videofile_masked.avi', codec, 25.0, size) while (capture.isOpened()): ret, frame = capture.read() if ret: # add mask to frame print("yes") results = model.detect([frame], verbose=0) r = results[0] frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) output.write(frame) print("finish") # cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break capture.release() output.release() cv2.destroyAllWindows()
else: break capture.release() output.release() cv2.destroyAllWindows() print(datetime.now() - startTime1)""" images = [] while (capture.isOpened()): ret, frame = capture.read() if ret: # add mask to frame images.append(frame) if len(images) == 32: startTime = datetime.now() results = model.detect(images, verbose=1) print("il a fallu ", (datetime.now() - startTime), " secondes soit ", (datetime.now() - startTime) / 32, " par image") for i, item in enumerate(zip(images, results)): image = item[0] r = item[1] frame = display_instances(image, r['rois'], r['masks'], r['class_ids'], class_names, None) output.write(frame) #cv2.imshow('frame', frame) images = [] if cv2.waitKey(1) & 0xFF == ord('q'): break else:
output = cv2.VideoWriter('videofile_masked_road_20%35%_2x_50_inc.avi', codec, 30.0, size) flag = 0 masking = 0 # 인코딩(Boxing) 처리 성능 향상을 위해서 한프레임씩 건너서 Boxing(Object Detecting) -> 속도 향상 print("Start masking") now = datetime.now() print("Start at :", now) start = round(time.time()) # while (1): #capture.isOpened() ret, frame = capture.read() # ret 받은 이미지가 있는지 여부 , 각 프레임 받기 if ret and masking == 0: results = model.detect( [frame], verbose=0) # 모델 사용 -> 모델에서 Forward Compute 해서 Detection 결과를 반환 r = results[0] # print("visualize_cv2 LINE 131 :", r) # print("class names :", class_names) #{'rois': array([[1061, 11, 1280, 201], masking = masking + 1 frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores'], Video_w, Video_w_20, Video_w_80, Video_h_35) # display_instances를 호출(수행)할 때 마다 output.write(frame) cv2.imshow('frame', frame) #원본 영상에 Masking이 입혀진 영상 보여주기 함수
def process_clip(video): ## Calculate video duration v = cv2.VideoCapture(video) v.set(cv2.CAP_PROP_POS_AVI_RATIO, 1) duration = v.get(cv2.CAP_PROP_POS_MSEC) frameCount = int(v.get(cv2.CAP_PROP_FRAME_COUNT)) print('duration = {}'.format(duration)) print('number of frames = {}'.format(frameCount)) # the 1st frame is frame 0, not 1, so "5335" means after the last frame POS_FRAMES = v.get(cv2.CAP_PROP_POS_FRAMES) FRAME_COUNT = v.get(cv2.CAP_PROP_FRAME_COUNT) print('POS_FRAMES = ' + str(POS_FRAMES)) print('FRAME_COUNT = ' + str(FRAME_COUNT)) v.release() ## Output video frame time, frame content, and bounding boxes to dictionary capture = cv2.VideoCapture(video) size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) codec = cv2.VideoWriter_fourcc(*'DIVX') fps = 30 output = cv2.VideoWriter('{}_masked.avi'.format(video), codec, fps, (640, 360)) count = 1 out_dict = {} while (capture.isOpened()): # cap.read() returns a bool (True/False). # If frame is read correctly, it will be True. So you can check end of the video by checking this return value ret, frame = capture.read() if ret: road_frame = frame[0:360, 640:1280] # add mask to frame results = model.detect([road_frame], verbose=0) r = results[0] road_frame = display_instances(road_frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) output.write(road_frame) out_dict[count] = {} out_dict[count]['cls_id'] = r['class_ids'] out_dict[count]['rois'] = r['rois'] print('[INFO] Frame {}/{}'.format(count, frameCount)) count += 1 else: break capture.release() output.release() ## Put the video data into a JSON file frame2content = {} for k, v in out_dict.items(): objs = [class_names[i] for i in v["cls_id"]] obj2num = dict(collections.Counter(objs)) _objs = [] for o in objs: _objs.append(o + "_" + str(obj2num[o])) obj2num[o] -= 1 obj2roi = {} for idx, o in enumerate(_objs): obj2roi[o] = list(map(int, list(v["rois"][idx, :]))) frame2content[k] = obj2roi json_log = open('{}.json'.format(video), 'w') json.dump(frame2content, json_log) json_log.close()
import numpy as np from visualize_cv2 import model, display_instances, class_names capture = cv2.VideoCapture('videofile.mp4') #read video size = ( int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), #get width, height int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) codec = cv2.VideoWriter_fourcc(*'DIVX') output = cv2.VideoWriter('videofile_masked.avi', codec, 60.0, size) #write video,fps, frame size while (capture.isOpened()): ret, frame = capture.read() if ret: # add mask to frame results = model.detect([frame], verbose=0) #frame send to model and get result r = results[0] frame = display_instances(frame, r['rois'], r['masks'], r['class_ids'], class_names, r['scores']) output.write(frame) #output writing to file cv2.imshow('frame', frame) #display frame if cv2.waitKey(1) & 0xFF == ord('q'): break else: break capture.release() output.release() cv2.destroyAllWindows()