def prepare_training(pose_trainable, lr): optimizer = RMSprop(lr=lr) models = compile_split_models(full_model, cfg, optimizer, pose_trainable=pose_trainable, ar_loss_weights=action_weight, copy_replica=cfg.pose_replica) full_model.summary() """Create validation callbacks.""" mpii_callback = MpiiEvalCallback(x_val, p_val, afmat_val, head_val, eval_model=models[0], pred_per_block=1, batch_size=1, logdir=logdir) penn_callback = PennActionEvalCallback(penn_te, eval_model=models[1], logdir=logdir) def end_of_epoch_callback(epoch): save_model.on_epoch_end(epoch) mpii_callback.on_epoch_end(epoch) penn_callback.on_epoch_end(epoch) if epoch in [15, 25]: lr = float(K.get_value(optimizer.lr)) newlr = 0.1*lr K.set_value(optimizer.lr, newlr) printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' \ % (lr, newlr, epoch)) return end_of_epoch_callback, models
poselayout=pa17j3d, topology='frames', use_gt_bbox=True) ntu_sf = Ntu(datasetpath('NTU'), ntu_pe_dataconf, poselayout=pa17j3d, topology='frames', use_gt_bbox=True) """Create an object to load data from all datasets.""" data_tr = BatchLoader([mpii, penn_sf, ntu_sf], ['frame'], ['pose'], TRAIN_MODE, batch_size=[batch_size_mpii, batch_size_ar, batch_size_ar], num_predictions=num_predictions, shuffle=True) """MPII validation samples.""" mpii_val = BatchLoader(mpii, ['frame'], ['pose', 'afmat', 'headsize'], VALID_MODE, batch_size=mpii.get_length(VALID_MODE), shuffle=True) printcn(OKBLUE, 'Pre-loading MPII validation data...') [x_val], [p_val, afmat_val, head_val] = mpii_val[0] mpii_callback = MpiiEvalCallback(x_val, p_val, afmat_val, head_val, map_to_pa16j=pa17j3d.map_to_pa16j, logdir=logdir) # """Human3.6H validation samples.""" # h36m_val = BatchLoader(h36m, ['frame'], # ['pose_w', 'pose_uvd', 'afmat', 'camera', 'action'], VALID_MODE, # batch_size=h36m.get_length(VALID_MODE), shuffle=True) # printcn(OKBLUE, 'Preloading Human3.6M validation samples...') # [x_val], [pw_val, puvd_val, afmat_val, scam_val, action] = h36m_val[0] # # h36m_callback = H36MEvalCallback(x_val, pw_val, afmat_val, # puvd_val[:,0,2], scam_val, action, logdir=logdir) model = spnet.build(cfg) loss = pose_regression_loss('l1l2bincross', 0.01) model.compile(loss=loss, optimizer=RMSprop(lr=start_lr))
num_predictions=num_blocks, shuffle=True) """Pre-load validation samples and generate the eval. callback.""" mpii_val = BatchLoader(mpii, x_dictkeys=['frame'], y_dictkeys=['pose', 'afmat', 'headsize'], mode=VALID_MODE, batch_size=mpii.get_length(VALID_MODE), num_predictions=1, shuffle=False) printcn(OKBLUE, 'Pre-loading MPII validation data...') [x_val], [p_val, afmat_val, head_val] = mpii_val[0] eval_callback = MpiiEvalCallback(x_val, p_val, afmat_val, head_val, eval_model=model, batch_size=2, pred_per_block=1, logdir=logdir) loss = pose_regression_loss('l1l2bincross', 0.01) model.compile(loss=loss, optimizer=RMSprop()) model.summary() def lr_scheduler(epoch, lr): if epoch in [80, 100]: newlr = 0.2 * lr printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' % (lr, newlr, epoch)) else:
def prepare_training(pose_trainable, lr): optimizer = SGD(lr=lr, momentum=0.9, nesterov=True) # optimizer = RMSprop(lr=lr) models = compile_split_models(full_model, cfg, optimizer, pose_trainable=pose_trainable, ar_loss_weights=action_weight, copy_replica=cfg.pose_replica) full_model.summary() """Create validation callbacks.""" mpii_callback = MpiiEvalCallback(mpii_x_val, mpii_p_val, mpii_afmat_val, mpii_head_val, eval_model=models[0], pred_per_block=1, map_to_pa16j=pa17j3d.map_to_pa16j, batch_size=1, logdir=logdir) h36m_callback = H36MEvalCallback(h36m_x_val, h36m_pw_val, h36m_afmat_val, h36m_puvd_val[:, 0, 2], h36m_scam_val, h36m_action, batch_size=1, eval_model=models[0], logdir=logdir) ntu_callback = NtuEvalCallback(ntu_te, eval_model=models[1], logdir=logdir) def end_of_epoch_callback(epoch): save_model.on_epoch_end(epoch) y_actu = [] y_pred = [] predictions = [] printcn(OKBLUE, 'Validation on Benset') for i in range(len(benset_dataloader.get_val_data_keys())): #printc(OKBLUE, '%04d/%04d\t' % (i, len(val_data_keys))) x, y = benset_val_batchloader.__next__() prediction = full_model.predict(x) pred_action = np.argmax(prediction[11]) annot_action = np.argmax(y[0]) y_actu.append(annot_action) y_pred.append(pred_action) if pred_action == annot_action: predictions.append(1) else: predictions.append(0) accuracy = 100.0 / len(predictions) * Counter(predictions)[1] conf_mat = confusion_matrix(y_actu, y_pred) printcn(OKBLUE, '') printcn(OKBLUE, 'Accuracy: %d' % accuracy) print(conf_mat) logarray[epoch] = accuracy with open(os.path.join(logdir, 'benset_val.json'), 'w') as f: json.dump(logarray, f) # if epoch == 0 or epoch >= 50: # mpii_callback.on_epoch_end(epoch) # h36m_callback.on_epoch_end(epoch) #ntu_callback.on_epoch_end(epoch) if epoch in [25, 31]: lr = float(K.get_value(optimizer.lr)) newlr = 0.1 * lr K.set_value(optimizer.lr, newlr) printcn(WARNING, 'lr_scheduler: lr %g -> %g @ %d' \ % (lr, newlr, epoch)) return end_of_epoch_callback, models