def label(video, log): # Initiate an empty list of tracked waves, ultimately recognized # waves, and a log of all tracked waves in each frame. # Initialize frame counters. frame_num = 1 num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # Initiate a timer for program performance: time_start = time.time() # The main loop is here: while True: # Read frames until end of clip. print(frame_num) successful_read, original_frame = video.read() if not successful_read: break # Preprocess frames. analysis_frame = mwt_preprocessing.preprocess(original_frame) if frame_num == 63: contours, _ = cv2.findContours(image=analysis_frame, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE, hierarchy=None, offset=None) for contour in contours: centroid = mwt_objects._get_centroid(contour) # print(centroid) # input() # if centroid == [219, 169]: sm_boundRect = cv2.boundingRect(contour) print(sm_boundRect) color = (0, 0, 255) resize_factor = 1 / mwt_preprocessing.RESIZE_FACTOR boundRect = [] for i in range(len(sm_boundRect)): boundRect.append(sm_boundRect[i] * 4) print(boundRect) display_frame = copy.deepcopy(original_frame) cv2.rectangle(display_frame, (int(boundRect[0]), int(boundRect[1])), \ (int(boundRect[0]+boundRect[2]), int(boundRect[1]+boundRect[3])), color, 2) cv2.imshow('Wave', display_frame) cv2.waitKey(0) cv2.destroyAllWindows() frame_num += 1
def analyze(video, write_output=True, label=False, rate=False, model=None): """Main routine for analyzing nearshore wave videos. Overlays detected waves onto orginal frames and writes to a new video. Returns a log with detected wave attrbutes, frame by frame. Args: video: mp4 video write_output: boolean indicating if a video with tracking overlay is to be written out. label: request a user to hand rate the frames of the wave? rate: use the model to generate a rating for the frames? Returns: recognized_waves: list of recognized wave objects wave_log: list of list of wave attributes for csv time_elapsed: performance of the program in frames/second """ # Initiate an empty list of tracked waves, ultimately recognized # waves, and a log of all tracked waves in each frame. tracked_waves = [] recognized_waves = [] wave_log = [] ratings = [] # Initialize frame counters. frame_num = 1 num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) fps = int(video.get(cv2.CAP_PROP_FPS)) # If an output video is to be made: if write_output is True: out = mwt_io.create_video_writer(video) # Initiate a timer for program performance: time_start = time.time() # The main loop is here: while True: # Write status update to stdio. status_update(frame_num, num_frames) # Read frames until end of clip. successful_read, original_frame = video.read() if not successful_read: break # Preprocess frames. analysis_frame = mwt_preprocessing.preprocess(original_frame) # Detect all sections. sections = mwt_detection.detect_sections(analysis_frame, frame_num, original_frame) # Track all waves in tracked_waves. mwt_tracking.track(tracked_waves, analysis_frame, frame_num, num_frames, original_frame) # Write tracked wave stats to wave_log. for wave in tracked_waves: wave_log.append( (frame_num, wave.name, wave.mass, wave.max_mass, wave.displacement, wave.max_displacement, wave.birth, wave.death, wave.recognized, wave.centroid)) # Remove dead waves from tracked_waves. dead_recognized_waves = [ wave for wave in tracked_waves if wave.death is not None and wave.recognized is True ] recognized_waves.extend(dead_recognized_waves) # Label the dead waves, if label flag was specified if label: mwt_label.label(dead_recognized_waves, fps, dead=True) # Rate the dead waves, if rate flag was specified if rate: mwt_rate.rate(ratings, dead_recognized_waves, model) tracked_waves = [wave for wave in tracked_waves if wave.death is None] # Remove duplicate waves, keeping earliest wave. tracked_waves.sort(key=lambda x: x.birth, reverse=True) for wave in tracked_waves: other_waves = [wav for wav in tracked_waves if not wav == wave] if mwt_tracking.will_be_merged(wave, other_waves): wave.death = frame_num tracked_waves = [wave for wave in tracked_waves if wave.death is None] tracked_waves.sort(key=lambda x: x.birth, reverse=False) # Check sections for any new potential waves and add to # tracked_waves. for section in sections: if not mwt_tracking.will_be_merged(section, tracked_waves): tracked_waves.append(section) # Label all current waves if label flag was specified if label: mwt_label.label(tracked_waves, fps) # Rate all current waves if rate flag was specified if rate: mwt_rate.rate(ratings, tracked_waves, model) # analysis_frame = cv2.cvtColor(analysis_frame, cv2.COLOR_GRAY2RGB) if write_output is True: # Draw detection boxes on original frame for visualization. original_frame = mwt_io.draw( tracked_waves, original_frame, #1) 1 / mwt_preprocessing.RESIZE_FACTOR) # Write frame to output video. # out.write(original_frame) #out.write(analysis_frame) # Increment the frame count. frame_num += 1 # Stop timer here and calc performance. time_elapsed = (time.time() - time_start) performance = (num_frames / time_elapsed) if rate: final_rating = mwt_rate.get_final_rating(ratings) print("Final rating for this video: {}".format(final_rating)) # Provide update to user here. if recognized_waves is not None: print("{} wave(s) recognized.".format(len(recognized_waves))) print("Program performance: %0.1f frames per second." % performance) for i, wave in enumerate(recognized_waves): print ("Wave #{}: ID: {}, Birth: {}, Death: {}," \ + " Max Displacement: {}, Max Mass: {}".format( i+1, wave.name, wave.birth, wave.death, wave.max_displacement, wave.max_mass)) else: print("No waves recognized.") # Clean-up resources. if write_output is True: out.release() return recognized_waves, wave_log, performance