def start_videoanalyser(): print("Starting DeepLabCut") model = load_model( r"D:\DeepPoseKit-Data-master\datasets\fly\best_model_densenet.h5") experiment_enabled = False video_output = True if experiment_enabled: print("Initializing experiment") experiment = ExampleExperiment() experiment.start_experiment() # some variables initialization all_rows = [] index = 0 while video.isOpened(): ret, frame = video.read() if ret: scmap, locref, pose = get_pose(frame, config, sess, inputs, outputs) peaks = find_local_peaks_new(scmap, locref, ANIMALS_NUMBER, config) skeletons = calculate_skeletons(peaks, ANIMALS_NUMBER) if skeletons: for skeleton in skeletons: if experiment_enabled: result, response = experiment.check_skeleton( frame, skeleton) plot_triggers_response(frame, response) out_frame = plot_bodyparts(frame, skeletons) else: out_frame = frame cv2.imshow('stream', out_frame) if video_output: video_file.write(out_frame) if experiment_enabled: all_rows.append( create_row(index, skeletons, experiment_enabled, experiment.get_trial())) else: all_rows.append( create_row(index, skeletons, experiment_enabled, None)) index += 1 else: break if cv2.waitKey(1) & 0xFF == ord('q'): break if experiment_enabled: experiment.stop_experiment() if video_output: print('Saving analyzed video for {}'.format(video_name)) video_file.release() video.release() create_dataframes(all_rows)
def get_pose_mp(input_q, output_q): """ Process to be used for each camera/DLC stream of analysis Designed to be run in an infinite loop :param input_q: index and corresponding frame :param output_q: index and corresponding analysis """ if MODEL_ORIGIN in ('DLC', 'MADLC'): config, sess, inputs, outputs = load_deeplabcut() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() if MODEL_ORIGIN == 'DLC': scmap, locref, pose = get_pose(frame, config, sess, inputs, outputs) peaks = find_local_peaks_new(scmap, locref, ANIMALS_NUMBER, config) #Use the line below to use raw DLC output rather then DLStream optimization # peaks = pose if MODEL_ORIGIN == 'MADLC': peaks = get_ma_pose(frame, config, sess, inputs, outputs) analysis_time = time.time() - start_time output_q.put((index, peaks, analysis_time)) elif MODEL_ORIGIN == 'DLC-LIVE': dlc_live = load_dlc_live() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() if not dlc_live.is_initialized: peaks = dlc_live.init_inference(frame) else: peaks = dlc_live.get_pose(frame) analysis_time = time.time() - start_time output_q.put((index, peaks, analysis_time)) elif MODEL_ORIGIN == 'DEEPPOSEKIT': predict_model = load_dpk() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() frame = frame[..., 1][..., None] st_frame = np.stack([frame]) prediction = predict_model.predict(st_frame, batch_size=1, verbose=True) peaks = prediction[0, :, :2] analysis_time = time.time() - start_time output_q.put((index,peaks,analysis_time)) elif MODEL_ORIGIN == 'SLEAP': sleap_model = load_sleap() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() input_frame = frame[:, :, ::-1] #this is weird, but without it, it does not seem to work... frames = np.array([input_frame]) prediction = sleap_model.predict(frames[[0]], batch_size=1) #check if this is multiple animal instances or single animal model if sleap_model.name == 'single_instance_inference_model': #get predictions (wrap it again, so the behavior is the same for both model types) peaks = np.array([prediction['peaks'][0, :]]) else: peaks = prediction['instance_peaks'][0, :] analysis_time = time.time() - start_time output_q.put((index,peaks,analysis_time)) else: raise ValueError(f'Model origin {MODEL_ORIGIN} not available.')
def get_pose_mp(input_q, output_q): """ Process to be used for each camera/DLC stream of analysis Designed to be run in an infinite loop :param input_q: index and corresponding frame :param output_q: index and corresponding analysis """ if MODEL_ORIGIN in ("DLC", "MADLC"): config, sess, inputs, outputs = load_deeplabcut() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() if MODEL_ORIGIN == "DLC": scmap, locref, pose = get_pose(frame, config, sess, inputs, outputs) if USE_DLSTREAM_POSTURE_DETECTION: """ This is a legacy function that was used in earlier versions""" peaks = find_local_peaks_new( scmap, locref, ANIMALS_NUMBER, config) # Use the line below to use raw DLC output rather then DLStream optimization else: peaks = pose if MODEL_ORIGIN == "MADLC": peaks = get_ma_pose(frame, config, sess, inputs, outputs) analysis_time = time.time() - start_time output_q.put((index, peaks, analysis_time)) elif MODEL_ORIGIN == "DLC-LIVE": dlc_live = load_dlc_live() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() if not dlc_live.is_initialized: peaks = dlc_live.init_inference(frame) else: peaks = dlc_live.get_pose(frame) analysis_time = time.time() - start_time output_q.put((index, peaks, analysis_time)) elif MODEL_ORIGIN == "DEEPPOSEKIT": predict_model = load_dpk() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() frame = frame[..., 1][..., None] st_frame = np.stack([frame]) prediction = predict_model.predict(st_frame, batch_size=1, verbose=True) peaks = prediction[0, :, :2] analysis_time = time.time() - start_time output_q.put((index, peaks, analysis_time)) elif MODEL_ORIGIN == "SLEAP": sleap_model = load_sleap() while True: if input_q.full(): index, frame = input_q.get() start_time = time.time() # Make sure image is (1, height, width, channels) and uint8 # (height, width) -> (height, width, 1) frame = np.expand_dims( frame, axis=-1) if frame.ndim == 2 else frame # (height, width, channels) -> (1, height, width, channels) frame = np.expand_dims( frame, axis=0) if frame.ndim == 3 else frame # predict_on_batch is MUCH faster as it does not retrace the model graph for same size inputs pred = sleap_model.predict_on_batch(frame) try: peaks = pred["instance_peaks"][ 0] # (n_poses, n_nodes, 2) except KeyError: # necessary for old sleap versions where single_instance models have different key naming peaks = pred["peaks"] analysis_time = time.time() - start_time output_q.put((index, peaks, analysis_time)) else: raise ValueError(f"Model origin {MODEL_ORIGIN} not available.")