コード例 #1
0
ファイル: callbacks.py プロジェクト: lcalem/main-pe
    def on_epoch_end(self, epoch, logs={}):
        model = self.model

        scores = measures.eval_human36m_sc_error(model,
                                                 self.num_blocks,
                                                 self.x,
                                                 self.pw,
                                                 self.afmat,
                                                 self.rootz,
                                                 self.scam,
                                                 pose_only=self.pose_only,
                                                 batch_size=self.batch_size)

        epoch += 1
        if self.logdir is not None:
            if not hasattr(self, 'logarray'):
                self.logarray = {}
            self.logarray[epoch] = scores
            with open(os.path.join(self.logdir, 'h36m_val.json'), 'w') as f:
                f.write(json.dumps(self.logarray))

        cur_best = min(scores)
        self.scores[epoch] = cur_best

        log.printcn(
            log.OKBLUE, 'Best score is %.1f at epoch %d' %
            (self.best_score, self.best_epoch))
コード例 #2
0
def compute_mpjpe(nb_blocks,
                  pred,
                  pose_w,
                  afmat,
                  rootz,
                  scam,
                  resol_z=2000.,
                  verbose=True):
    '''
    smaller version of eval_human36m_sc_error takes the prediction
    
    nb_blocks: nb pose prediction blocks of the model (there is one y_pred per block)
    pred: the prediction tensor (pose part only)
          shape : (nb_blocks, batch_size, nb_joints, dim + 1)  i.e (2, 16, 17, 4)
    pose_w: the ground truth pose in the world in millimeters
            shape : (batch_size, nb_joints, dim) i.e (16, 17, 3)
    afmat: transformation matrix used to convert uvd coordinates of the crop to the uvd coordinates of the original image
    rootz: origin of the z axis
    scam: camera used to convert uvd coordinates (uv pixels and depth in mm) to world (millimeters) coordinates
    '''
    print("MPJPE: pred shape %s" % pred.shape)
    print("MPJPE: pose_w shape %s" % pose_w.shape)
    print("MPJPE: afmat shape %s" % afmat.shape)
    print("MPJPE: scam shape %s" % scam.shape)
    # assert len(pred) == len(pose_w) == len(afmat) == len(scam)

    # y true in the world 
    y_true_w = pose_w
    
    if rootz.get_shape().ndims == 1:
        rootz = tf.expand_dims(rootz, axis=-1)

    # Move the root joints from GT poses to origin
    y_true_w -= y_true_w[:, 0:1, :]

    if verbose:
        log.printc(log.WARNING, 'Avg. mm. error:')

    lower_err = np.inf
    scores = []

    for b in range(nb_blocks):

        y_pred = pred[b]
        y_pred_w = pred_to_world_tf(y_pred, y_true_w, afmat, scam, rootz)

        err_w = mean_distance_error(y_true_w[:, 0:, :], y_pred_w[:, 0:, :])
        scores.append(err_w)
        if verbose:
            log.printc(log.WARNING, ' %.1f' % err_w)

        # Keep the best prediction
        if err_w < lower_err:
            lower_err = err_w

    if verbose:
        log.printcn('', '')
        log.printcn(log.WARNING, 'Final averaged error (mm): %.3f' % lower_err)

    return scores
コード例 #3
0
    def build_callbacks(self, prop, relabel_step=None):
        '''
        prop = proportion of known labels of current run

        TensorBoard
        MAPCallback
        SaveModel
        LearningRateScheduler
        '''
        log.printcn(log.OKBLUE, 'Building callbacks')
        cb_list = list()

        # # tensorboard
        # logs_folder = os.environ['HOME'] + '/partial_experiments/tensorboard/' + self.exp_folder.split('/')[-1] + '/prop%s' % prop
        # log.printcn(log.OKBLUE, 'Tensorboard log folder %s' % logs_folder)
        # tensorboard = TensorBoard(log_dir=os.path.join(logs_folder, 'tensorboard'))
        # cb_list.append(tensorboard)

        # Validation callback
        if cfg.CALLBACK.VAL_CB is not None:
            cb_list.append(
                self.build_val_cb(cfg.CALLBACK.VAL_CB,
                                  p=prop,
                                  relabel_step=relabel_step))
        else:
            log.printcn(log.WARNING, 'Skipping validation callback')

        # Save Model
        cb_list.append(
            SaveModel(self.exp_folder, prop, relabel_step=relabel_step))

        # Learning rate scheduler
        cb_list.append(LearningRateScheduler(lr_scheduler))

        return cb_list
コード例 #4
0
ファイル: eval_baseline_h36m.py プロジェクト: lcalem/main-pe
def eval_baseline_h36m():
    weights_path = sys.args[1]
    eval_model = MultiBranchModel(
        dim=3, n_joints=17, nb_pose_blocks=1)  # TODO retrieve from config
    eval_model.load_weights(weights_path, pose_only=True)

    # local loading
    local_h36m_path = '/home/caleml/datasets/h36m'
    local_h36m = Human36M(local_h36m_path,
                          dataconf=config.human36m_dataconf,
                          poselayout=pose_format.pa17j3d,
                          topology='frames')

    h36m_val = BatchLoader(local_h36m, ['frame'],
                           ['pose_w', 'pose_uvd', 'afmat', 'camera'],
                           VALID_MODE,
                           batch_size=local_h36m.get_length(VALID_MODE),
                           shuffle=True)

    log.printcn(log.OKBLUE, 'Preloading Human3.6M validation samples...')

    [x_val], [pw_val, puvd_val, afmat_val, scam_val] = h36m_val[0]

    scores = eval_human36m_sc_error(eval_model.model,
                                    x_val,
                                    pw_val,
                                    afmat_val,
                                    puvd_val[:, 0, 2],
                                    scam_val,
                                    batch_size=24)

    pprint(scores)
コード例 #5
0
ファイル: common.py プロジェクト: lcalem/main-pe
def lr_scheduler(epoch, lr):

    if epoch in [20, 30]:
        newlr = 0.5 * lr
        log.printcn(log.WARNING,
                    'lr_scheduler: lr %g -> %g @ %d' % (lr, newlr, epoch))
    else:
        newlr = lr
        log.printcn(log.OKBLUE, 'lr_scheduler: lr %g @ %d' % (newlr, epoch))

    return newlr
コード例 #6
0
ファイル: scheduler.py プロジェクト: lcalem/partial-labels
def lr_scheduler(epoch, lr):

    if epoch in cfg.CALLBACK.LR_TRIGGER:
        newlr = cfg.CALLBACK.LR_FACTOR * lr
        log.printcn(log.WARNING,
                    'lr_scheduler: lr %g -> %g @ %d' % (lr, newlr, epoch))
    else:
        newlr = lr
        log.printcn(log.OKBLUE, 'lr_scheduler: lr %g @ %d' % (newlr, epoch))

    return newlr
コード例 #7
0
ファイル: base.py プロジェクト: lcalem/partial-labels
    def finish_step(self, relabel_step):
        assert relabel_step == self.relabel_step

        relabel_logpath = os.path.join(self.exp_folder, 'relabeling',
                                       'log_relabeling.csv')
        with open(relabel_logpath, 'a') as f_log:
            f_log.write('%s,%s,%s,%s,%s\n' %
                        (self.p, self.relabel_step, self.total_added,
                         self.positive_added, self.negative_added))

        log.printcn(
            log.OKBLUE,
            '\tAdded %s labels during relabeling, logging into %s' %
            (self.total_added, relabel_logpath))
        log.printcn(log.OKBLUE, '\tNew dataset path %s' % (self.targets_path))

        self.f_relabel.close()
コード例 #8
0
    def launch_percentage_relabel(self, p):
        '''
        For a given known label percentage p:

        1. load dataset
        3. callbacks
        4. load / build model
        5. train
        '''

        self.dataset_train = self.load_dataset(mode=cfg.DATASET.TRAIN,
                                               batch_size=cfg.BATCH_SIZE,
                                               p=p)
        self.dataset_test = self.load_dataset(mode=cfg.DATASET.TEST,
                                              batch_size=cfg.TEST_BATCH_SIZE)

        # model
        self.build_model(self.dataset_train.nb_classes, p)
        self.relabelator = self.load_relabelator(p,
                                                 self.dataset_train.nb_classes)

        for relabel_step in range(cfg.RELABEL.STEPS):
            log.printcn(log.OKBLUE, '\nDoing relabel step %s' % (relabel_step))

            # callbacks
            cb_list = self.build_callbacks(p, relabel_step=relabel_step)

            # actual training
            n_epochs = cfg.TRAINING.N_EPOCHS if cfg.RELABEL.EPOCHS is None else cfg.RELABEL.EPOCHS[
                relabel_step]
            steps_per_epoch = len(
                self.dataset_train
            ) if not cfg.TRAINING.STEPS_PER_EPOCH else cfg.TRAINING.STEPS_PER_EPOCH
            self.model.train(self.dataset_train,
                             steps_per_epoch=steps_per_epoch,
                             cb_list=cb_list,
                             n_epochs=n_epochs,
                             dataset_val=self.dataset_test)

            # relabeling
            self.relabel_dataset(relabel_step)

        # cleaning (to release memory before next launch)
        K.clear_session()
        del self.model
コード例 #9
0
    def finish_step(self, relabel_step):
        # log relabelling stats
        relabel_logpath = os.path.join(self.exp_folder, 'relabeling',
                                       'log_relabeling.csv')
        os.makedirs(os.path.dirname(relabel_logpath), exist_ok=True)
        with open(relabel_logpath, 'a') as f_log:
            f_log.write('{},{},{},{}\n'.format(self.p, relabel_step, self.TP,
                                               self.FP))

        log.printcn(
            log.OKBLUE, '\tAdded {} TP and {} FP, logging into {}'.format(
                self.TP, self.FP, relabel_logpath))

        # Setting the path to the new targets
        self.targets_path = os.path.join(self.data_dir,
                                         'relabeled_annotations',
                                         self.exp_name, '{}', str(self.p),
                                         str(relabel_step))
コード例 #10
0
    def build_val_cb(self, cb_name, p, relabel_step=None):
        '''
        Validation callback
        Different datasets require different validations, like mAP, DICE, etc
        This callback is instanciated here
        '''
        if cb_name == 'map':
            log.printcn(log.OKBLUE, 'loading mAP callback')
            X_test, Y_test = self.dataset_test[0]

            map_cb = MAPCallback(X_test,
                                 Y_test,
                                 self.exp_folder,
                                 p,
                                 relabel_step=relabel_step)
            return map_cb

        else:
            raise Exception('Invalid validation callback %s' % cb_name)
コード例 #11
0
def exp_init(cmd, exps_folder=None, exp_name=None):
    '''
    common actions for setuping an experiment:
    - create experiment folder
    - dump config in it
    - create cmd file
    - dump current model code in it (because for now we only save weights)
    '''
    if exps_folder is None:
        exps_folder = os.path.join(os.environ['HOME'], 'prior_experiments')

    # model folder
    name_suffix = ('_%s' % exp_name) if exp_name else ''
    model_folder = '%s/exp_%s_%s%s' % (exps_folder, datetime.datetime.now(
    ).strftime("%Y%m%d_%H%M"), cfg.ARCHI.NAME, name_suffix)
    os.makedirs(model_folder)

    n_epochs = cfg.TRAINING.NB_EPOCH if cfg.RELABEL.EPOCHS is None else sum(
        cfg.RELABEL.EPOCHS)
    log.printcn(
        log.OKBLUE, "Conducting experiment for %s epochs in folder %s" %
        (n_epochs, model_folder))

    # config
    config_path = os.path.join(model_folder, 'config.yaml')
    with open(config_path, 'w+') as f_conf:
        yaml.dump(cfg, f_conf, default_flow_style=False)

    # cmd
    cmd_path = os.path.join(model_folder, 'cmd.txt')
    with open(cmd_path, 'w+') as f_cmd:
        f_cmd.write(cmd + '\n')

    # model
    src_folder = os.path.dirname(os.path.realpath(__file__)) + '/..'
    dst_folder = os.path.join(model_folder, 'model_src/')

    shutil.copytree(src_folder,
                    dst_folder,
                    ignore=shutil.ignore_patterns('.*'))

    return model_folder
コード例 #12
0
def launch_training(dataset_path, model_folder, n_epochs, batch_size,
                    pose_blocks):

    # loading dataset
    h36m_path = dataset_path
    h36m = Human36M(h36m_path,
                    dataconf=config.human36m_dataconf,
                    poselayout=pose_format.pa17j3d,
                    topology='frames')

    data_tr_h36m = BatchLoader(h36m, ['frame'], ['pose'] * pose_blocks,
                               TRAIN_MODE,
                               batch_size=batch_size,
                               shuffle=True)

    # validation
    h36m_val = BatchLoader(h36m, ['frame'],
                           ['pose_w', 'pose_uvd', 'afmat', 'camera'],
                           VALID_MODE,
                           batch_size=h36m.get_length(VALID_MODE),
                           shuffle=True)

    log.printcn(log.OKBLUE, 'Preloading Human3.6M validation samples...')
    [x_val], [pw_val, puvd_val, afmat_val, scam_val] = h36m_val[0]
    eval_callback = callbacks.H36MEvalCallback(pose_blocks,
                                               x_val,
                                               pw_val,
                                               afmat_val,
                                               puvd_val[:, 0, 2],
                                               scam_val,
                                               pose_only=True,
                                               logdir=model_folder)

    # model
    model = MultiBranchModel(dim=3, n_joints=17, nb_pose_blocks=pose_blocks)
    model.build_pose_only()
    model.add_callback(eval_callback)

    model.train(data_tr_h36m,
                steps_per_epoch=len(data_tr_h36m),
                model_folder=model_folder,
                n_epochs=n_epochs)
コード例 #13
0
    def relabel_dataset(self, relabel_step):
        '''
        Use model to make predictions
        Use predictions to relabel elements (create a new relabeled csv dataset)
        Use created csv to update dataset train
        '''
        log.printcn(log.OKBLUE,
                    '\nDoing relabeling inference step %s' % relabel_step)

        self.relabelator.init_step(relabel_step)

        # predict
        for i in range(len(self.dataset_train)):
            x_batch, y_batch = self.dataset_train[i]

            y_pred = self.model.predict(x_batch)  # TODO not the logits!!!!!!!!

            self.relabelator.relabel(x_batch, y_batch, y_pred)

        self.relabelator.finish_step(relabel_step)
        targets_path = self.relabelator.targets_path

        # update dataset
        self.dataset_train.update_targets(targets_path)
コード例 #14
0
        help=
        'the specific percentage of known labels. When not specified all percentages are sequentially launched'
    )
    parser.add_argument('--exp_name', '-n', help='optional experiment name')
    parser.add_argument(
        '--initial_weights',
        '-w',
        help='optional path to the weights that should be loaded')

    # options management
    args = parser.parse_args()
    options = config_utils.parse_options_file(args.options)
    config_utils.update_config(options)

    # init
    exp_folder = utils.exp_init(' '.join(sys.argv),
                                exp_name=(args.exp_name or args.options))

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

    try:
        launcher = Launcher(exp_folder,
                            percent=args.percent,
                            initial_weights=args.initial_weights)
        launcher.launch()
    finally:
        # cleanup if needed (test folders)
        if cfg.CLEANUP is True:
            log.printcn(log.OKBLUE, 'Cleaning folder %s' % (exp_folder))
            shutil.rmtree(exp_folder)
コード例 #15
0
ファイル: common.py プロジェクト: lcalem/main-pe
    def launch(self):
        '''
        main entrypoint
        1. training data
        2. validation data
        3. callbacks
        4. model building
        5. launch actual training
        '''

        h36m = Human36M(self.dataset_path,
                        dataconf=config.human36m_dataconf,
                        poselayout=pose_format.pa17j3d,
                        topology='frames')

        # training data
        dataset_output = self.get_h36m_outputs()
        data_tr_h36m = BatchLoader(h36m, ['frame'],
                                   dataset_output,
                                   TRAIN_MODE,
                                   batch_size=self.batch_size,
                                   shuffle=True)

        # validation data
        h36m_val = BatchLoader(h36m, ['frame'],
                               ['pose_w', 'pose_uvd', 'afmat', 'camera'],
                               VALID_MODE,
                               batch_size=h36m.get_length(VALID_MODE),
                               shuffle=True)

        log.printcn(log.OKBLUE, 'Preloading Human3.6M validation samples...')
        [x_val], [pw_val, puvd_val, afmat_val, scam_val] = h36m_val[0]
        # assert rootz == puvd_val[:,0,2]

        # callbacks
        cb_list = list()
        eval_callback = callbacks.H36MEvalCallback(self.pose_blocks,
                                                   x_val,
                                                   pw_val,
                                                   afmat_val,
                                                   puvd_val[:, 0, 2],
                                                   scam_val,
                                                   pose_only=self.pose_only,
                                                   logdir=self.model_folder)

        logs_folder = os.environ[
            'HOME'] + '/pe_experiments/tensorboard/' + self.model_folder.split(
                '/')[-1]
        print('Tensorboard log folder %s' % logs_folder)
        tensorboard = TensorBoard(
            log_dir=os.path.join(logs_folder, 'tensorboard'))

        cb_list.append(tensorboard)
        cb_list.append(eval_callback)
        cb_list.append(LearningRateScheduler(lr_scheduler))
        cb_list.append(callbacks.SaveModel(self.model_folder))

        # model
        self.build_model()

        # train
        self.model.train(data_tr_h36m,
                         steps_per_epoch=len(data_tr_h36m),
                         model_folder=self.model_folder,
                         n_epochs=self.n_epochs,
                         cb_list=cb_list,
                         n_workers=self.nb_workers)
コード例 #16
0
ファイル: __init__.py プロジェクト: lcalem/partial-labels
 def log(self, msg):
     if self.verbose:
         log.printcn(log.HEADER, msg)
コード例 #17
0
ファイル: common.py プロジェクト: lcalem/main-pe
    def build_model(self):

        if self.exp_type == 'baseline':
            self.model = MultiBranchModel(dim=self.dim,
                                          n_joints=self.nb_joints,
                                          nb_pose_blocks=self.pose_blocks)
            self.model.build_pose_only()

        elif self.exp_type == 'hybrid':
            self.model = MultiBranchModel(dim=self.dim,
                                          n_joints=self.nb_joints,
                                          nb_pose_blocks=self.pose_blocks)
            self.model.build()

        elif self.exp_type == 'hybrid_reduced':
            assert isinstance(self.zp_depth,
                              int), 'wrong zp_depth %s' % self.zp_depth
            log.printcn(
                log.OKBLUE,
                'launching hybrid_reduced model with zp_depth = %s' %
                self.zp_depth)
            self.model = MultiBranchReduced(dim=self.dim,
                                            n_joints=self.nb_joints,
                                            nb_pose_blocks=self.pose_blocks,
                                            zp_depth=self.zp_depth)
            self.model.build()

        elif self.exp_type == 'hybrid_vgg':
            self.model = MultiBranchVGGModel(dim=self.dim,
                                             n_joints=self.nb_joints,
                                             nb_pose_blocks=self.pose_blocks)
            self.model.build()

        elif self.exp_type == 'hybrid_stop':
            self.model = MultiBranchStopped(dim=self.dim,
                                            n_joints=self.nb_joints,
                                            nb_pose_blocks=self.pose_blocks,
                                            zp_depth=self.zp_depth)
            self.model.build()

        elif self.exp_type == 'hybrid_r_bb':
            self.model = MBMReducedBB(dim=self.dim,
                                      n_joints=self.nb_joints,
                                      nb_pose_blocks=self.pose_blocks,
                                      zp_depth=self.zp_depth)
            self.model.build()

        elif self.exp_type == 'cycle':
            self.model = CycleModel(dim=self.dim,
                                    n_joints=self.nb_joints,
                                    nb_pose_blocks=self.pose_blocks)
            self.model.build()

        elif self.exp_type == 'cycle_reduced':
            self.model = CycleReduced(dim=self.dim,
                                      n_joints=self.nb_joints,
                                      nb_pose_blocks=self.pose_blocks)
            self.model.build()

        elif self.exp_type == 'cycle_r_bb':
            self.model = CycleReducedBB(dim=self.dim,
                                        cut_zp=self.cut_zp,
                                        n_joints=self.nb_joints,
                                        nb_pose_blocks=self.pose_blocks)
            self.model.build()

        else:
            raise Exception('Unknown exp type %s' % self.exp_type)
コード例 #18
0
def eval_human36m_sc_error(model,
                           num_blocks,
                           x,
                           pose_w,
                           afmat,
                           rootz,
                           scam,
                           pose_only=False,
                           resol_z=2000.,
                           batch_size=8,
                           logdir=None,
                           verbose=True):

    assert len(x) == len(pose_w) == len(afmat) == len(scam)

    y_true_w = pose_w.copy()
    
    y_pred_w = np.zeros((num_blocks,) + y_true_w.shape)
    if rootz.ndim == 1:
        rootz = np.expand_dims(rootz, axis=-1)

    pred = model.predict(x, batch_size=batch_size, verbose=1)

    # Move the root joints from GT poses to origin
    y_true_w -= y_true_w[:, 0:1, :]

    if verbose:
        log.printc(log.WARNING, 'Avg. mm. error:')

    lower_err = np.inf
    scores = []

    for b in range(num_blocks):

        if pose_only:
            y_pred = pred[b]
        else:
            y_pred = pred[b + 1]  # first output is image and pose output start after

        # take only the spatial dims
        y_pred = y_pred[:, :, 0:3]

        # project normalized coordiates to the image plane
        y_pred[:, :, 0:2] = transform.transform_pose_sequence(afmat.copy(), y_pred[:, :, 0:2], inverse=True)

        # Recover the absolute Z
        y_pred[:, :, 2] = (resol_z * (y_pred[:, :, 2] - 0.5)) + rootz
        y_pred_uvd = y_pred[:, :, 0:3]

        # camera inverse projection
        for j in range(len(y_pred_uvd)):
            cam = camera.camera_deserialize(scam[j])
            y_pred_w[b, j, :, :] = cam.inverse_project(y_pred_uvd[j])

        # Move the root joint from predicted poses to the origin
        y_pred_w[b, :, :, :] -= y_pred_w[b, :, 0:1, :]

        err_w = mean_distance_error(y_true_w[:, 0:, :], y_pred_w[b, :, 0:, :])
        scores.append(err_w)
        if verbose:
            log.printc(log.WARNING, ' %.1f' % err_w)

        # Keep the best prediction
        if err_w < lower_err:
            lower_err = err_w

    if verbose:
        log.printcn('', '')

    if logdir is not None:
        np.save('%s/y_pred_w.npy' % logdir, y_pred_w)
        np.save('%s/y_true_w.npy' % logdir, y_true_w)

    log.printcn(log.WARNING, 'Final averaged error (mm): %.3f' % lower_err)

    return scores