コード例 #1
0
def GetPoseFfmf(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,batchsize,start=0,stop=1):
    ''' Batchwise prediction of pose '''
    

    ny,nx=int(cap.framesize[0]),int(cap.framesize[1])    
    print("annotating fmf file")
    if cfg['cropping']:
        print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
        nx=cfg['x2']-cfg['x1']
        ny=cfg['y2']-cfg['y1']
        if nx>0 and ny>0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1']>=0 and cfg['x2']<int(cap.framesize[1]+1) and cfg['y1']>=0 and cfg['y2']<int(cap.framesize[0]+1):
            pass #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')
    nframes=int(stop*nframes)
    
    PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0 # keeps track of which image within a batch should be written to
    batch_num = 0 # keeps track of which batch you are at
    
    frames = np.empty((batchsize, ny, nx, 3), dtype='ubyte') # this keeps all frames in a batch
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    while(counter<nframes):
        if counter%step==0:
            pbar.update(step)
        frame = cap.get_frame(counter)
        if frame.ndim != 3:
            frame = skimage.color.gray2rgb(frame)
        frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        if cfg['cropping']:
            frames[batch_ind] = img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2']])
        else:
            frames[batch_ind] = img_as_ubyte(frame)
            
        if batch_ind==batchsize-1:
            pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
            PredicteData[batch_num*batchsize:(batch_num+1)*batchsize, :] = pose
            batch_ind = 0
            batch_num += 1
        else:
            batch_ind+=1
            
        counter+=1

    nframes = counter
    print("Detected frames: ", nframes)
    if batch_ind>0:
        pose = predict.getposeNP(frames, dlc_cfg, sess, inputs, outputs) #process the whole batch (some frames might be from previous batch!)
        PredicteData[batch_num*batchsize:batch_num*batchsize+batch_ind, :] = pose[:batch_ind,:]

    pbar.close()
    return PredicteData,nframes
コード例 #2
0
def GetPoseF(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes, batchsize):
    ''' Batchwise prediction of pose '''
    PredictedData = np.zeros(
        (nframes,
         dlc_cfg['num_outputs'] * 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0  # keeps track of which image within a batch should be written to
    batch_num = 0  # keeps track of which batch you are at
    ny, nx = int(cap.get(4)), int(cap.get(3))
    if cfg['cropping']:
        ny, nx = checkcropping(cfg, cap)

    frames = np.empty((batchsize, ny, nx, 3),
                      dtype='ubyte')  # this keeps all frames in a batch
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while (cap.isOpened()):
        if counter % step == 0:
            pbar.update(step)
        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg['cropping']:
                frames[batch_ind] = img_as_ubyte(frame[cfg['y1']:cfg['y2'],
                                                       cfg['x1']:cfg['x2']])
            else:
                frames[batch_ind] = img_as_ubyte(frame)

            if batch_ind == batchsize - 1:
                pose = predict.getposeNP(frames, dlc_cfg, sess, inputs,
                                         outputs)
                PredictedData[batch_num * batchsize:(batch_num + 1) *
                              batchsize, :] = pose
                batch_ind = 0
                batch_num += 1
            else:
                batch_ind += 1
        else:
            nframes = counter
            print("Detected frames: ", nframes)
            if batch_ind > 0:
                pose = predict.getposeNP(
                    frames, dlc_cfg, sess, inputs, outputs
                )  #process the whole batch (some frames might be from previous batch!)
                PredictedData[batch_num * batchsize:batch_num * batchsize +
                              batch_ind, :] = pose[:batch_ind, :]
            break
        counter += 1

    pbar.close()
    return PredictedData, nframes
コード例 #3
0
 def predict_im(self, im):
     imfull = np.tile(im[:, :, None], (1, 1, 3))
     pose = getposeNP(imfull[None, :, :, :],
                      self.dlc_cfg,
                      self.sess,
                      self.inputs,
                      self.outputs,
                      outall=False)
     return pose.reshape(-1, 3)
コード例 #4
0
def GetPosesofFrames(cfg,dlc_cfg, sess, inputs, outputs,directory,framelist,nframes,batchsize):
    ''' Batchwise prediction of pose  for framelist in directory'''
    from skimage import io
    print("Starting to extract posture")
    im=io.imread(os.path.join(directory,framelist[0]),mode='RGB')
    ny,nx,nc=np.shape(im)
    print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny)

    PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0 # keeps track of which image within a batch should be written to
    batch_num = 0 # keeps track of which batch you are at

    if cfg['cropping']:
        print("Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file." %(cfg['x1'], cfg['x2'],cfg['y1'], cfg['y2']))
        nx,ny=cfg['x2']-cfg['x1'],cfg['y2']-cfg['y1']
        if nx>0 and ny>0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1']>=0 and cfg['x2']<int(np.shape(im)[1]) and cfg['y1']>=0 and cfg['y2']<int(np.shape(im)[0]):
            pass #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')
    
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    
    if batchsize==1:
        for counter,framename in enumerate(framelist):
                frame=io.imread(os.path.join(directory,framename),mode='RGB')
                if counter%step==0:
                    pbar.update(step)

                if cfg['cropping']:
                    frame= img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
                else:
                    frame = img_as_ubyte(frame)
                    
                pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
                PredicteData[counter, :] = pose.flatten()
    else:
        frames = np.empty((batchsize, ny, nx, 3), dtype='ubyte') # this keeps all the frames of a batch
        for counter,framename in enumerate(framelist):
                frame=io.imread(os.path.join(directory,framename),mode='RGB')
                if counter%step==0:
                    pbar.update(step)

                if cfg['cropping']:
                    frames[batch_ind] = img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2'],:])
                else:
                    frames[batch_ind] = img_as_ubyte(frame)
                    
                if batch_ind==batchsize-1:
                    pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
                    PredicteData[batch_num*batchsize:(batch_num+1)*batchsize, :] = pose
                    batch_ind = 0
                    batch_num += 1
                else:
                   batch_ind+=1
            
        if batch_ind>0: #take care of the last frames (the batch that might have been processed)
            pose = predict.getposeNP(frames, dlc_cfg, sess, inputs, outputs) #process the whole batch (some frames might be from previous batch!)
            PredicteData[batch_num*batchsize:batch_num*batchsize+batch_ind, :] = pose[:batch_ind,:]

    pbar.close()
    return PredicteData,nframes,nx,ny
コード例 #5
0
def GetPoseF(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes, batchsize):
    ''' Batchwise prediction of pose '''

    PredicteData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0  # keeps track of which image within a batch should be written to
    batch_num = 0  # keeps track of which batch you are at
    ny, nx = int(cap.get(4)), int(cap.get(3))
    if cfg['cropping']:
        print(
            "Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file."
            % (cfg['x1'], cfg['x2'], cfg['y1'], cfg['y2']))
        nx = cfg['x2'] - cfg['x1']
        ny = cfg['y2'] - cfg['y1']
        if nx > 0 and ny > 0:
            pass
        else:
            raise Exception('Please check the order of cropping parameter!')
        if cfg['x1'] >= 0 and cfg['x2'] < int(cap.get(
                3) + 1) and cfg['y1'] >= 0 and cfg['y2'] < int(cap.get(4) + 1):
            pass  #good cropping box
        else:
            raise Exception('Please check the boundary of cropping!')

    frames = np.empty((batchsize, ny, nx, 3),
                      dtype='ubyte')  # this keeps all frames in a batch
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while (cap.isOpened()):
        if counter % step == 0:
            pbar.update(step)
        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg['cropping']:
                frames[batch_ind] = img_as_ubyte(frame[cfg['y1']:cfg['y2'],
                                                       cfg['x1']:cfg['x2']])
            else:
                frames[batch_ind] = img_as_ubyte(frame)

            if batch_ind == batchsize - 1:
                pose, scmap = predict.getposeNP(frames, dlc_cfg, sess, inputs,
                                                outputs)
                PredicteData[batch_num * batchsize:(batch_num + 1) *
                             batchsize, :] = pose
                scmap_total.append(scmap)
                batch_ind = 0
                batch_num += 1
            else:
                batch_ind += 1
        else:
            nframes = counter
            print("Detected frames: ", nframes)
            if batch_ind > 0:
                pose, scmap = predict.getposeNP(
                    frames, dlc_cfg, sess, inputs, outputs
                )  #process the whole batch (some frames might be from previous batch!)
                PredicteData[batch_num * batchsize:batch_num * batchsize +
                             batch_ind, :] = pose[:batch_ind, :]
                scmap_total.append(scmap)
            break
        counter += 1

    pbar.close()
    return PredicteData, nframes, scmap_total