def test_tocategorical_predP_user(predP_batB_seqL1_cls3_user_cat): y, Y, nc = [predP_batB_seqL1_cls3_user_cat[k] for k in ['y', 'Y', 'nclasses']] assert_almost_equal(nu.to_categorical(y, nclasses=nc), Y) if y.max() == nc: assert_almost_equal(nu.to_categorical(y), Y) assert True
def test_tocategorical_trues_generic(batB_seqL1_cls3_trues_generic_cat): y, Y, nc = [batB_seqL1_cls3_trues_generic_cat[k] for k in ['y', 'Y', 'nclasses']] assert_almost_equal(nu.to_categorical(y, nclasses=nc), Y) if y.max() == nc: assert_almost_equal(nu.to_categorical(y), Y) assert True
def pred1_batB_seqlQ_cls3_preds_confmat( request, base_labels_cls3, batB_seqlQ_cls3_trues_confmat ): # yapf: disable i = request.param yp = [base_labels_cls3[ii] for ii in i] nclasses = batB_seqlQ_cls3_trues_confmat['nclasses'] Yp = nu.to_categorical(yp, nclasses) yt = batB_seqlQ_cls3_trues_confmat['yt'] Yt = batB_seqlQ_cls3_trues_confmat['Yt'] confmat, confrecall, confprecision = [], [], [] for b, ybb in enumerate(yp): _confmat = ext_confusionmatrix(yt[b, ...], ybb, labels=np.arange(nclasses)) _confrecall = _confmat / (_confmat.sum(axis=1))[:, np.newaxis] _confprecision = (_confmat.T / (_confmat.sum(axis=0))[:, np.newaxis]).T confmat.append(_confmat) confrecall.append(_confrecall) confprecision.append(_confprecision) confmat = np.array(confmat) confrecall = np.array(confrecall) confprecision = np.array(confprecision) yp = np.array(yp) # Predictor=1, Batchsize=B, SequenceLength=Q, ClassLabel=1(implicit) ytg = batB_seqlQ_cls3_trues_confmat['ytg'] Ytg = batB_seqlQ_cls3_trues_confmat['Ytg'] ypg = yp[np.newaxis, ...] Ypg = nu.to_categorical(ypg, nclasses) # Predictor=1, Batchsize=B, SequenceLength=1(sumaxis), ClassLabel=(nclasses, nclasses)(implicit) confmatg = confmat[np.newaxis, :, np.newaxis, ...] confrecallg = confrecall[np.newaxis, :, np.newaxis, ...] confprecisiong = confprecision[np.newaxis, :, np.newaxis, ...] return { 'yt': yt, 'Yt': Yt, 'yp': yp, 'Yp': Yp, 'confmat': confmat, 'confrecall': confrecall, 'confprecision': confprecision, 'ytg': ytg, 'Ytg': Ytg, 'ypg': ypg, 'Ypg': Ypg, 'confmatg': confmatg, 'confrecallg': confrecallg, 'confprecisiong': confprecisiong, }
def test_tocategorical_predP_batB_seqLQ_generic(predP_batB_seqlQ_cls3_generic_cat): y, Y, nc = [predP_batB_seqlQ_cls3_generic_cat[k] for k in ['y', 'Y', 'nclasses']] print(y.shape, Y.shape, nu.to_categorical(y, nc).shape) assert_almost_equal(nu.to_categorical(y, nclasses=nc), Y) if y.max() == nc: assert_almost_equal(nu.to_categorical(y), Y) assert True
def batB_seql1_cls3_trues_confmat(request, base_labels_cls3): i = request.param y_user = base_labels_cls3[i] nclasses = 3 Y_user = nu.to_categorical(y_user, nclasses) # Batchsize=B, SequenceLength=1, ClassLabel=1(implicit) y_generic = y_user[:, np.newaxis] Y_generic = nu.to_categorical(y_generic, nclasses) return { 'yt': y_user, 'Yt': Y_user, 'nclasses': nclasses, 'ytg': y_generic, 'Ytg': Y_generic }
def pred1_batB_seql1_cls3_preds_confmat( request, base_labels_cls3, batB_seql1_cls3_trues_confmat ): # yapf: disable i = request.param yp = base_labels_cls3[i] nclasses = batB_seql1_cls3_trues_confmat['nclasses'] Yp = nu.to_categorical(yp, nclasses) yt = batB_seql1_cls3_trues_confmat['yt'] Yt = batB_seql1_cls3_trues_confmat['Yt'] confmat = ext_confusionmatrix(yt, yp, labels=np.arange(nclasses)) confrecall = confmat / (confmat.sum(axis=1))[:, np.newaxis] confprecision = (confmat.T / (confmat.sum(axis=0))[:, np.newaxis]).T # Predictor=1, Batchsize=B, SequenceLength=1, ClassLabel=1(implicit) ytg = batB_seql1_cls3_trues_confmat['ytg'] Ytg = batB_seql1_cls3_trues_confmat['Ytg'] ypg = yp[np.newaxis, :, np.newaxis] Ypg = nu.to_categorical(ypg, nclasses) # Predictor=1, Batchsize=1(sumaxis), SequenceLength=1, ClassLabel=(nclasses, nclasses)(implicit) confmatg = confmat[np.newaxis, np.newaxis, np.newaxis, ...] confrecallg = confrecall[np.newaxis, np.newaxis, np.newaxis, ...] confprecisiong = confprecision[np.newaxis, np.newaxis, np.newaxis, ...] return { 'yt': yt, 'Yt': Yt, 'yp': yp, 'Yp': Yp, 'confmat': confmat, 'confrecall': confrecall, 'confprecision': confprecision, 'ytg': ytg, 'Ytg': Ytg, 'ypg': ypg, 'Ypg': Ypg, 'confmatg': confmatg, 'confrecallg': confrecallg, 'confprecisiong': confprecisiong, }
def batB_seqlQ_cls3_trues_confmat(request, base_labels_cls3): i = request.param y_user = [base_labels_cls3[ii] for ii in i] nclasses = 3 y_user = np.array(y_user) Y_user = nu.to_categorical(y_user, nclasses) # Batchsize=B, SequenceLength=Q, ClassLabel=1(implicit) y_generic = y_user Y_generic = nu.to_categorical(y_generic, nclasses) return { 'yt': y_user, 'Yt': Y_user, 'nclasses': nclasses, 'ytg': y_generic, 'Ytg': Y_generic }
def predict_on_inputs_provider( # pylint: disable=too-many-locals,too-many-statements model, inputs_provider, export_to, init, tran): def _save(paths, datas): with hFile(export_to, 'a') as f: for path, data in zip(paths, datas): if path not in f.keys(): f.create_dataset(path, data=data, compression='lzf', fletcher32=True) f.flush() currn = None ctrue = [] cpred = [] tot_conf = None tot_conf_vp = None for xy, (_, chunking) in inputs_provider.flow( indefinitely=False, only_labels=False, with_chunking=True, ): ctrue.append(xy[1]) cpred.append(model.predict_on_batch(xy[0])) if currn is None: currn = chunking.labelpath continue if chunking.labelpath != currn: t = np.concatenate(ctrue[:-1]) p = np.concatenate(cpred[:-1]) if sub != 'keepzero': # from activity_name above z = t[:, 0].astype(bool) p[z, 0] = 1. p[z, 1:] = 0. # raw confusion conf = nu.confusion_matrix_forcategorical( t, nu.to_categorical(p.argmax(axis=-1), nclasses=t.shape[-1])) # viterbi decoded - no scaling vp = lu.viterbi_smoothing(p, init, tran) conf_vp = nu.confusion_matrix_forcategorical( t, nu.to_categorical(vp, nclasses=t.shape[-1])) _save( paths=["{}/{}".format(_p, currn) for _p in ('raw', 'viterbi')], datas=[conf, conf_vp], ) print(currn, end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(conf), onlydiag=True) if tot_conf is None: tot_conf = conf tot_conf_vp = conf_vp else: tot_conf += conf tot_conf_vp += conf_vp currn = chunking.labelpath ctrue = ctrue[-1:] cpred = cpred[-1:] # last chunking t = np.concatenate(ctrue) p = np.concatenate(cpred) if sub != 'keepzero': # from activity_name above z = t[:, 0].astype(bool) p[z, 0] = 1. p[z, 1:] = 0. conf = nu.confusion_matrix_forcategorical( t, nu.to_categorical(p.argmax(axis=-1), nclasses=t.shape[-1])) vp = lu.viterbi_smoothing(p, init, tran) conf_vp = nu.confusion_matrix_forcategorical( t, nu.to_categorical(vp, nclasses=t.shape[-1])) _save( paths=["{}/{}".format(_p, currn) for _p in ('raw', 'viterbi')], datas=[conf, conf_vp], ) print(currn, end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(conf), onlydiag=True) tot_conf += conf tot_conf_vp += conf_vp # print out total-statistics _save( paths=["{}/{}".format(_p, 'final') for _p in ('raw', 'viterbi')], datas=[tot_conf, tot_conf_vp], ) print("\nFINAL - RAW", end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(tot_conf), onlydiag=False) print("\nFINAL - VITERBI", end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(tot_conf_vp), onlydiag=False)
def predict_on_inputs_provider(model, inputs_provider, subsampling, export_to_dir, init, tran, priors): export_to = os.path.join(export_to_dir, "confs.h5") def _save(paths, datas): with hFile(export_to, 'a') as f: for path, data in zip(paths, datas): if path not in f.keys(): f.create_dataset(path, data=data, compression='lzf', fletcher32=True) f.flush() currn = None ctrue = [] cpred = [] tot_conf = None tot_conf_vp = None tot_conf_svp = None for xy, (_, chunking) in inputs_provider.flow( indefinitely=False, only_labels=False, with_chunking=True, ): ctrue.append(xy[1]) cpred.append(model.predict_on_batch(xy[0])) if currn is None: currn = chunking.labelpath continue if chunking.labelpath != currn: t = np.concatenate(ctrue[:-1]) p = np.concatenate(cpred[:-1]) if subsampling != 'nosub': z = t[:, 0].astype(bool) p[z, 0] = 1. p[z, 1:] = 0. # raw confusion conf = nu.confusion_matrix_forcategorical( t, nu.to_categorical(p.argmax(axis=-1), nclasses=t.shape[-1])) # viterbi decoded - no scaling vp = viterbi(p, init, tran, priors=None) conf_vp = nu.confusion_matrix_forcategorical( t, nu.to_categorical(vp, nclasses=t.shape[-1])) # viterbi decoded - scaling vp = viterbi(p, init, tran, priors=priors) conf_svp = nu.confusion_matrix_forcategorical( t, nu.to_categorical(vp, nclasses=t.shape[-1])) _save( paths=[ "{}/{}".format(_p, currn) for _p in ('raw', 'viterbi', 'sviterbi') ], datas=[conf, conf_vp, conf_svp], ) print(currn, end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(conf), onlydiag=True) if tot_conf is None: tot_conf = conf tot_conf_vp = conf_vp tot_conf_svp = conf_svp else: tot_conf += conf tot_conf_vp += conf_vp tot_conf_svp += conf_svp currn = chunking.labelpath ctrue = ctrue[-1:] cpred = cpred[-1:] _save( paths=[ "{}/{}".format(_p, 'final') for _p in ('raw', 'viterbi', 'sviterbi') ], datas=[tot_conf, tot_conf_vp, tot_conf_svp], ) print("\nFINAL - RAW", end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(tot_conf), onlydiag=False) print("\nFINAL - VITERBI", end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(tot_conf_vp), onlydiag=False) print("\nFINAL - VITERBI - SCALED", end=' ') nu.print_prec_rec(*nu.normalize_confusion_matrix(tot_conf_svp), onlydiag=False)