Esempio n. 1
0
def GetPoseS_GTF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes):
    ''' Non batch wise pose estimation for video cap.'''
    if cfg['cropping']:
        ny,nx=checkcropping(cfg,cap)

    pose_tensor = predict.extract_GPUprediction(outputs, dlc_cfg) #extract_output_tensor(outputs, dlc_cfg)
    PredictedData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    pbar=tqdm(total=nframes)
    counter=0
    step=max(10,int(nframes/100))
    while(cap.isOpened()):
            if counter%step==0:
                pbar.update(step)

            ret, frame = cap.read()
            if ret:
                frame=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                if cfg['cropping']:
                    frame= img_as_ubyte(frame[cfg['y1']:cfg['y2'],cfg['x1']:cfg['x2']])
                else:
                    frame = img_as_ubyte(frame)

                pose = sess.run(pose_tensor, feed_dict={inputs: np.expand_dims(frame, axis=0).astype(float)})
                pose[:, [0,1,2]] = pose[:, [1,0,2]]
                #pose = predict.getpose(frame, dlc_cfg, sess, inputs, outputs)
                PredictedData[counter, :] = pose.flatten()  # NOTE: thereby cfg['all_joints_names'] should be same order as bodyparts!
            else:
                nframes=counter
                break
            counter+=1

    pbar.close()
    return PredictedData,nframes
Esempio n. 2
0
def GetPoseF_GTF(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes, batchsize):
    ''' Batchwise prediction of pose '''
    PredictedData = np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    batch_ind = 0  # keeps track of which image within a batch should be written to
    batch_num = 0  # keeps track of which batch you are at
    ny, nx = int(cap.get(4)), int(cap.get(3))
    if cfg['cropping']:
        ny, nx = checkcropping(cfg, cap)

    pose_tensor = predict.extract_GPUprediction(
        outputs, dlc_cfg)  #extract_output_tensor(outputs, dlc_cfg)
    frames = np.empty((batchsize, ny, nx, 3),
                      dtype='ubyte')  # this keeps all frames in a batch
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while (cap.isOpened()):
        if counter % step == 0:
            pbar.update(step)
        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg['cropping']:
                frames[batch_ind] = img_as_ubyte(frame[cfg['y1']:cfg['y2'],
                                                       cfg['x1']:cfg['x2']])
            else:
                frames[batch_ind] = img_as_ubyte(frame)

            if batch_ind == batchsize - 1:
                #pose = predict.getposeNP(frames,dlc_cfg, sess, inputs, outputs)
                pose = sess.run(pose_tensor, feed_dict={inputs: frames})
                pose[:, [0, 1, 2]] = pose[:, [
                    1, 0, 2
                ]]  #change order to have x,y,confidence
                pose = np.reshape(
                    pose,
                    (batchsize, -1))  #bring into batchsize times x,y,conf etc.
                PredictedData[batch_num * batchsize:(batch_num + 1) *
                              batchsize, :] = pose

                batch_ind = 0
                batch_num += 1
            else:
                batch_ind += 1
        else:
            nframes = counter
            print("Detected frames: ", nframes)
            if batch_ind > 0:
                #pose = predict.getposeNP(frames, dlc_cfg, sess, inputs, outputs) #process the whole batch (some frames might be from previous batch!)
                pose = sess.run(pose_tensor, feed_dict={inputs: frames})
                pose[:, [0, 1, 2]] = pose[:, [1, 0, 2]]
                pose = np.reshape(pose, (batchsize, -1))
                PredictedData[batch_num * batchsize:batch_num * batchsize +
                              batch_ind, :] = pose[:batch_ind, :]

            break
        counter += 1

    pbar.close()
    return PredictedData, nframes
Esempio n. 3
0
Snapshots = Snapshots[increasing_indices]
print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)
nframes = 300
tf.reset_default_graph()
# Check if data already was generated:
dlc_config['init_weights'] = os.path.join(modelfolder, 'train',
                                          Snapshots[snapshotindex])
trainingsiterations = (dlc_config['init_weights'].split(
    os.sep)[-1]).split('-')[-1]
# Update number of output and batchsize
dlc_config['num_outputs'] = cfg.get('num_outputs',
                                    dlc_config.get('num_outputs', 1))
batchsize = 1
dlc_config['batch_size'] = cfg['batch_size']
sess, inputs, outputs = predict.setup_GPUpose_prediction(dlc_config)
pose_tensor = predict.extract_GPUprediction(
    outputs, dlc_config)  #extract_output_tensor(outputs, dlc_cfg)
PredictedData = np.zeros(
    (nframes,
     dlc_config['num_outputs'] * 3 * len(dlc_config['all_joints_names'])))
counter = 0
step = max(10, int(nframes / 100))
x_range = list(range(0, (3 * len(dlc_config['all_joints_names'])), 3))
y_range = list(range(1, (3 * len(dlc_config['all_joints_names'])), 3))
batch_ind = 0
batch_num = 0
ny, nx = int(cap_C.get(4)), int(cap_C.get(3))
frames = np.empty((batchsize, ny, nx, 3),
                  dtype='ubyte')  # this keeps all frames in a batch

input("Press Enter to CALIBRATE :")
pbar = tqdm(total=nframes)
  def __init__(self, config ,shuffle=1, trainingsetindex=0, 
                     gputouse=None, rgb=True):
    '''
    The constructor loads the deeplearning model from the specified path
    config - path where the model config.yaml file is stored
    shuffle - Not sure how it affects the algorithm. It was set to one
             inside the core library. So keep sticking to 1 unless you 
             have a strong need to change
    trainingsetindex - always set it 0. Not sure of its effect. Used deep inside
                      the core library
    gputouse - gpu card number to use. The algorithm uses GPU by default 
               when tensorflow GPU version is installed. If GPU is not
               getting utlised, this is a good parameter to check. But setting
               this to None in our present system still utilises the GPU.
    rgd - set to true if we use a RGB image. For grayscale image, which is our
          case this is always False
    '''

    if 'TF_CUDNN_USE_AUTOTUNE' in os.environ:
        del os.environ['TF_CUDNN_USE_AUTOTUNE'] #was potentially set during training

    if gputouse is not None:  # gpu selection
        os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)

    vers = (tf.__version__).split('.')

    if int(vers[0]) == 1 and int(vers[1]) > 12:
        TF = tf.compat.v1
    else:
        TF = tf

    TF.reset_default_graph()

    cfg = auxiliaryfunctions.read_config(config)
    cfg['batch_size'] = 1
    trainFraction = cfg['TrainingFraction'][trainingsetindex]
    modelfolder=os.path.join(cfg["project_path"],
                             str(auxiliaryfunctions.GetModelFolder
                                      (trainFraction,shuffle,cfg)))
    path_test_config = Path(modelfolder) / 'test' / 'pose_cfg.yaml'
    try:
        dlc_cfg = load_config(str(path_test_config))
    except FileNotFoundError:
        raise FileNotFoundError("It seems the model for shuffle %s and" 
                                "trainFraction %s does not exist."%(shuffle,
                                                             trainFraction))
    # Check which snapshots are available and sort them by # iterations
    try:
      Snapshots = np.array([fn.split('.')[0]for fn in 
                           os.listdir(os.path.join(modelfolder , 
                                                   'train'))if "index" in fn])
    except FileNotFoundError:
        raise FileNotFoundError("Snapshots not found! It seems the dataset"
                                "for shuffle %s has not been trained/does not"
                                "exist.\n Please train it before using it to"
                                "analyze videos.\n Use the "
                                "function 'train_network' to train the "
                                "network for shuffle %s."%(shuffle,shuffle))

    if cfg['snapshotindex'] == 'all':
        print("Snapshotindex is set to 'all' in the config.yaml file. "
              "Running video analysis with all snapshots is very costly!"
              "Use the function 'evaluate_network' to choose the best" 
              "the snapshot. For now, changing snapshot index to -1!")
        snapshotindex = -1
    else:
        snapshotindex=cfg['snapshotindex']

    increasing_indices = np.argsort([int(m.split('-')[1]) for m in Snapshots])
    Snapshots = Snapshots[increasing_indices]

    print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)

    ##################################################
    # Load and setup CNN part detector
    ##################################################

    # Check if data already was generated:
    dlc_cfg['init_weights'] = os.path.join(modelfolder , 'train', Snapshots[snapshotindex])
    trainingsiterations = (dlc_cfg['init_weights'].split(os.sep)[-1]).split('-')[-1]

    #update batchsize (based on parameters in config.yaml)
    dlc_cfg['batch_size'] = cfg['batch_size']

    # Name for scorer:

    # update number of outputs and adjust pandas indices
    dlc_cfg['num_outputs'] = cfg.get('num_outputs', 1)

    # Name for scorer:
    self.sess, self.inputs, self.outputs = predict.setup_GPUpose_prediction(dlc_cfg)

    if gputouse is not None: #gpu selectinon
            os.environ['CUDA_VISIBLE_DEVICES'] = str(gputouse)
    self.rgb = rgb
    self.cfg = cfg
    self.dlc_cfg = dlc_cfg
    self.pose_tensor = predict.extract_GPUprediction(self.outputs, self.dlc_cfg)

    if self.cfg['cropping']:
      self.ny, self.nx=checkcropping(self.cfg,cap)