Beispiel #1
0
def prepare_training(pose_trainable, lr):
    optimizer = RMSprop(lr=lr)
    models = compile_split_models(full_model, cfg, optimizer,
            pose_trainable=pose_trainable, ar_loss_weights=action_weight,
            copy_replica=cfg.pose_replica)
    full_model.summary()

    """Create validation callbacks."""
    mpii_callback = MpiiEvalCallback(x_val, p_val, afmat_val, head_val,
            eval_model=models[0], pred_per_block=1, batch_size=1, logdir=logdir)
    penn_callback = PennActionEvalCallback(penn_te, eval_model=models[1],
            logdir=logdir)

    def end_of_epoch_callback(epoch):

        save_model.on_epoch_end(epoch)
        mpii_callback.on_epoch_end(epoch)
        penn_callback.on_epoch_end(epoch)

        if epoch in [15, 25]:
            lr = float(K.get_value(optimizer.lr))
            newlr = 0.1*lr
            K.set_value(optimizer.lr, newlr)
            printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' \
                    % (lr, newlr, epoch))

    return end_of_epoch_callback, models
Beispiel #2
0
def prepare_training(pose_trainable, lr):
    optimizer = SGD(lr=lr, momentum=0.9, nesterov=True)
    # optimizer = RMSprop(lr=lr)
    models = compile_split_models(full_model,
                                  cfg,
                                  optimizer,
                                  pose_trainable=pose_trainable,
                                  ar_loss_weights=action_weight,
                                  copy_replica=cfg.pose_replica)
    full_model.summary()
    """Create validation callbacks."""
    # mpii_callback = MpiiEvalCallback(mpii_x_val, mpii_p_val, mpii_afmat_val,
    # mpii_head_val, eval_model=models[0], pred_per_block=1,
    # map_to_pa16j=pa17j3d.map_to_pa16j, batch_size=1, logdir=logdir)

    # h36m_callback = H36MEvalCallback(h36m_x_val, h36m_pw_val, h36m_afmat_val,
    # h36m_puvd_val[:,0,2], h36m_scam_val, h36m_action,
    # batch_size=1, eval_model=models[0], logdir=logdir)

    ntu_callback = NtuEvalCallback(ntu_te, eval_model=models[1], logdir=logdir)

    def end_of_epoch_callback(epoch):

        save_model.on_epoch_end(epoch)
        # if epoch == 0 or epoch >= 50:
        # mpii_callback.on_epoch_end(epoch)
        # h36m_callback.on_epoch_end(epoch)

        ntu_callback.on_epoch_end(epoch)

        if epoch in [58, 70]:
            lr = float(K.get_value(optimizer.lr))
            newlr = 0.1 * lr
            K.set_value(optimizer.lr, newlr)
            printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' \
                    % (lr, newlr, epoch))

    return end_of_epoch_callback, models
Beispiel #3
0
def prepare_training(pose_trainable, lr):
    optimizer = SGD(lr=lr, momentum=0.9, nesterov=True)
    # optimizer = RMSprop(lr=lr)
    models = compile_split_models(full_model,
                                  cfg,
                                  optimizer,
                                  pose_trainable=pose_trainable,
                                  ar_loss_weights=action_weight,
                                  copy_replica=cfg.pose_replica)
    full_model.summary()
    """Create validation callbacks."""
    mpii_callback = MpiiEvalCallback(mpii_x_val,
                                     mpii_p_val,
                                     mpii_afmat_val,
                                     mpii_head_val,
                                     eval_model=models[0],
                                     pred_per_block=1,
                                     map_to_pa16j=pa17j3d.map_to_pa16j,
                                     batch_size=1,
                                     logdir=logdir)

    h36m_callback = H36MEvalCallback(h36m_x_val,
                                     h36m_pw_val,
                                     h36m_afmat_val,
                                     h36m_puvd_val[:, 0, 2],
                                     h36m_scam_val,
                                     h36m_action,
                                     batch_size=1,
                                     eval_model=models[0],
                                     logdir=logdir)

    ntu_callback = NtuEvalCallback(ntu_te, eval_model=models[1], logdir=logdir)

    def end_of_epoch_callback(epoch):

        save_model.on_epoch_end(epoch)

        y_actu = []
        y_pred = []
        predictions = []
        printcn(OKBLUE, 'Validation on Benset')
        for i in range(len(benset_dataloader.get_val_data_keys())):
            #printc(OKBLUE, '%04d/%04d\t' % (i, len(val_data_keys)))

            x, y = benset_val_batchloader.__next__()
            prediction = full_model.predict(x)

            pred_action = np.argmax(prediction[11])
            annot_action = np.argmax(y[0])

            y_actu.append(annot_action)
            y_pred.append(pred_action)

            if pred_action == annot_action:
                predictions.append(1)
            else:
                predictions.append(0)

            accuracy = 100.0 / len(predictions) * Counter(predictions)[1]

        conf_mat = confusion_matrix(y_actu, y_pred)
        printcn(OKBLUE, '')
        printcn(OKBLUE, 'Accuracy: %d' % accuracy)
        print(conf_mat)

        logarray[epoch] = accuracy

        with open(os.path.join(logdir, 'benset_val.json'), 'w') as f:
            json.dump(logarray, f)

        # if epoch == 0 or epoch >= 50:
        # mpii_callback.on_epoch_end(epoch)
        # h36m_callback.on_epoch_end(epoch)

        #ntu_callback.on_epoch_end(epoch)

        if epoch in [25, 31]:
            lr = float(K.get_value(optimizer.lr))
            newlr = 0.1 * lr
            K.set_value(optimizer.lr, newlr)
            printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' \
                    % (lr, newlr, epoch))

    return end_of_epoch_callback, models