示例#1
0
def GetPoseDynamic(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,detectiontreshold,margin):
    ''' Non batch wise pose estimation for video cap by dynamically cropping around previously detected parts.'''
    if cfg['cropping']:
        ny,nx=checkcropping(cfg,cap)
    else:
        ny,nx=(int(cap.get(4)),int(cap.get(3)))
    x1,x2,y1,y2=0,nx,0,ny
    detected = False
    #TODO: perform detection on resized image (For speed)

    PredictedData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    while(cap.isOpened()):
            if counter%step==0:
                pbar.update(step)

            ret, frame = cap.read()
            if ret:
                #print(counter,x1,x2,y1,y2,detected)
                originalframe=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if cfg['cropping']:
                    frame= img_as_ubyte(originalframe[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2']])[y1:y2,x1:x2]
                else:
                    frame = img_as_ubyte(originalframe[y1:y2,x1:x2])

                pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs).flatten()
                detection=np.any(pose[2::3]>detectiontreshold) #is anything detected?
                if detection:
                    pose[0::3],pose[1::3]=pose[0::3]+x1,pose[1::3]+y1 #offset according to last bounding box
                    x1,x2,y1,y2=getboundingbox(pose[0::3],pose[1::3],nx,ny,margin) #coordinates for next iteration
                    if not detected:
                        detected=True #object detected
                else:
                    if detected and (x1+y1+y2-ny+x2-nx)!=0: #was detected in last frame and dyn. cropping was performed >> but object lost in cropped variant >> re-run on full frame!
                        #print("looking again, lost!")
                        if cfg['cropping']:
                            frame= img_as_ubyte(originalframe[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2']])
                        else:
                            frame = img_as_ubyte(originalframe)
                        pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs).flatten() #no offset is necessary

                    x0,y0=x1,y1
                    x1,x2,y1,y2=0,nx,0,ny
                    detected=False

                PredictedData[counter, :] = pose
            else:
                nframes=counter
                break
            counter+=1

    pbar.close()
    return PredictedData,nframes
示例#2
0
def GetPoseS(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes):
    ''' Non batch wise pose estimation for video cap.'''
    if cfg['cropping']:
        ny,nx=checkcropping(cfg,cap)

    PredictedData = np.zeros((nframes, dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    while(cap.isOpened()):
            if counter%step==0:
                pbar.update(step)

            ret, frame = cap.read()
            if ret:
                frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if cfg['cropping']:
                    frame= img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2']])
                else:
                    frame = img_as_ubyte(frame)
                pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
                PredictedData[counter, :] = pose.flatten()  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
            else:
                nframes=counter
                break
            counter+=1

    pbar.close()
    return PredictedData,nframes
示例#3
0
def get_pose(image, config, session, inputs, outputs):
    """
    Gets scoremap, local reference and pose from DeepLabCut using given image
    Pose is most probable points for each joint, and not really used later
    Scoremap and local reference is essential to extract skeletons
    :param image: frame which would be analyzed
    :param config, session, inputs, outputs: DeepLabCut configuration and TensorFlow variables from load_deeplabcut()

    :return: tuple of scoremap, local reference and pose
    """
    scmap, locref, pose = predict.getpose(image, config, session, inputs, outputs, True)
    return scmap, locref, pose
def GetPoseS(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes):
    ''' Non batch wise pose estimation for video cap.'''
    if cfg['cropping']:
        print(
            "Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file."
            % (cfg['x1'], cfg['x2'], cfg['y1'], cfg['y2']))
        nx = cfg['x2'] - cfg['x1']
        ny = cfg['y2'] - cfg['y1']
        if nx > 0 and ny > 0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1'] >= 0 and cfg['x2'] < int(cap.get(
                3) + 1) and cfg['y1'] >= 0 and cfg['y2'] < int(cap.get(4) + 1):
            pass  #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')

    PredicteData = np.zeros(
        (nframes,
         dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))

    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while (cap.isOpened()):
        if counter % step == 0:
            pbar.update(step)

        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg['cropping']:
                frame = img_as_ubyte(frame[cfg['y1']:cfg['y2'],
                                           cfg['x1']:cfg['x2']])
            else:
                frame = img_as_ubyte(frame)
            pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
            PredicteData[counter, :] = pose.flatten(
            )  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
        else:
            nframes = counter
            break
        counter += 1

    pbar.close()
    return PredicteData, nframes
示例#5
0
def GetPoseSfmf(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,start=0,stop=1):
    ''' Non batch wise pose estimation for video cap.'''
    print("annotating fmf file")
    if cfg['cropping']:
        print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
        nx=cfg['x2']-cfg['x1']
        ny=cfg['y2']-cfg['y1']
        if nx>0 and ny>0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1']>=0 and cfg['x2']<int(cap.framesize[1]+1) and cfg['y1']>=0 and cfg['y2']<int(cap.framesize[0]+1):
            pass #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')
    nframes=int(stop*nframes)
    PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    step = 10
    while(counter<nframes):
            if counter%step==0:
                pbar.update(step)
            
            frame = cap.get_frame(counter)[0]
            if frame.ndim != 3:
                frame = skimage.color.gray2rgb(frame)
            frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg['cropping']:
                frame= img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2']])
            else:
                frame = img_as_ubyte(frame)
            #print(frame)
            pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
            PredicteData[counter, :] = pose.flatten()  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!

            counter+=1
            
    pbar.close()
    return PredicteData,nframes
示例#6
0
def GetPosesofFrames(cfg,dlc_cfg, sess, inputs, outputs,directory,framelist,nframes,batchsize):
    ''' Batchwise prediction of pose  for framelist in directory'''
    from skimage import io
    print("Starting to extract posture")
    im=io.imread(os.path.join(directory,framelist[0]),mode='RGB')
    ny,nx,nc=np.shape(im)
    print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)

    PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0 # keeps track of which image within a batch should be written to
    batch_num = 0 # keeps track of which batch you are at

    if cfg['cropping']:
        print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
        nx,ny=cfg['x2']-cfg['x1'],cfg['y2']-cfg['y1']
        if nx>0 and ny>0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1']>=0 and cfg['x2']<int(np.shape(im)[1]) and cfg['y1']>=0 and cfg['y2']<int(np.shape(im)[0]):
            pass #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')
    
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    
    if batchsize==1:
        for counter,framename in enumerate(framelist):
                frame=io.imread(os.path.join(directory,framename),mode='RGB')
                if counter%step==0:
                    pbar.update(step)

                if cfg['cropping']:
                    frame= img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
                else:
                    frame = img_as_ubyte(frame)
                    
                pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
                PredicteData[counter, :] = pose.flatten()
    else:
        frames = np.empty((batchsize, ny, nx, 3), dtype='ubyte') # this keeps all the frames of a batch
        for counter,framename in enumerate(framelist):
                frame=io.imread(os.path.join(directory,framename),mode='RGB')
                if counter%step==0:
                    pbar.update(step)

                if cfg['cropping']:
                    frames[batch_ind] = img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
                else:
                    frames[batch_ind] = img_as_ubyte(frame)
                    
                if batch_ind==batchsize-1:
                    pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
                    PredicteData[batch_num*batchsize:(batch_num+1)*batchsize, :] = pose
                    batch_ind = 0
                    batch_num += 1
                else:
                   batch_ind+=1
            
        if batch_ind>0: #take care of the last frames (the batch that might have been processed)
            pose = predict.getposeNP(frames, dlc_cfg, sess, inputs, outputs) #process the whole batch (some frames might be from previous batch!)
            PredicteData[batch_num*batchsize:batch_num*batchsize+batch_ind, :] = pose[:batch_ind,:]

    pbar.close()
    return PredicteData,nframes,nx,ny
def frame_process(frame_arr, dlc_cfg, sess, inputs, outputs, counter,
                  save_frames, destfolder, LED, x_arr, y_arr, frame_time,
                  start, baseline):
    """Estimates pose in each frame, and optionally plots the pose on each frame."""
    # Set up parameters for refractory period (to check whether there was movement on the previous frame).
    global thisFrameWasMoved
    global lastFrameWasMoved
    global threshold_time
    global threshold_count
    x_avg_left = []
    y_avg_left = []
    x_avg_right = []
    y_avg_right = []
    # Run DeepLabCut pose prediction on each batch of two frames
    for n, frame in enumerate(frame_arr):
        pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
        if time.time() - start >= 10:
            PredicteData[counter + n, :] = pose.flatten(
            )  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
            # Create lists to store average x and y paw positions
            for x_val, y_val in zip(x_range, y_range):
                x_arr.append(PredicteData[counter + n, :][x_val])
                y_arr.append(PredicteData[counter + n, :][y_val])
            if n == 0:
                add = 0
            elif n == 1:
                add = 8
            x_avg_right.append(np.mean(x_arr[0 + add:3 + add]))
            x_avg_left.append(np.mean(x_arr[4 + add:7 + add]))
            y_avg_right.append(np.mean(y_arr[0 + add:3 + add]))
            y_avg_left.append(np.mean(y_arr[4 + add:7 + add]))
            # Save the frames in a separate thread (if we wish to do so)
            if save_frames:
                _thread.start_new_thread(
                    frame_save_func,
                    (frame, x_range, y_range, x_avg_left, y_avg_left,
                     x_avg_right, y_avg_right, destfolder, n, counter))
    # THRESHOLDS FOR Y MOVEMENT
    # min y movement on left paw to be counted as a trigger = 5 px
    # max y movement on right paw to be counted as a trigger = 10 px
    # Max y movement overall: 100 px
    y_left = 5
    y_right = 10
    y_upper_lim = 100

    # Red LED flash timing (s)
    gpio_light_time = 0.2

    # Length of trial (for data recording purposes)
    trial_length = 130

    # Water reward release time (s)
    gpio_water_time = 0.15

    # Refractory time (s): time after last trigger during which no LED flash can be triggered/reward given
    refractory_time = 0.3

    thisFrameWasMoved = False

    # We wait for 10.1 s after the start of the trial to start giving behavioural feedback. This is because the latency
    # for pose estimation is longer, and the framerate is less stable, in the first 10 s of the trial
    # (likely because of Python library setup).
    if counter >= 1 and time.time() - start > 10.1:
        trial_start = True
        if not baseline:
            _thread.start_new_thread(led_task,
                                     (LED, trial_start, trial_length - 10)),
        led_arr[counter, 1] = abs(y_avg_left[1] - y_avg_left[0])
        led_arr[counter, 2] = abs(y_avg_right[1] - y_avg_right[0])
        if (y_left <= abs(y_avg_left[1] - y_avg_left[0]) <= y_upper_lim
                and y_right >= abs(y_avg_right[1] - y_avg_right[0])):
            # and PredicteData[counter, acc_range].mean() >= 0.20
            thisFrameWasMoved = True
            if threshold_count == 0:
                threshold_time = time.time() - 0.5
        if thisFrameWasMoved and not lastFrameWasMoved and abs(
                time.time() - threshold_time) >= refractory_time:
            threshold_time = time.time()
            threshold_count += 1
            delay = threshold_time - frame_time
            led_arr[counter + 1, 3] = threshold_time
            led_arr[counter + 1, 4] = delay
            ttt = True
            _thread.start_new_thread(
                thresholdFunc, (LED, baseline, ttt, gpio_light_time,
                                gpio_water_time, counter, threshold_time))
        lastFrameWasMoved = thisFrameWasMoved

    # Blank out lists and other variables that are changed for each trigger
    x_arr = []
    y_arr = []
    x_avg_left = []
    y_avg_left = []
    x_avg_right = []
    y_avg_right = []
    x_first = 0
    y_first = 0
    threshold = 0