def predict(name, sp, arrsource, arrfeats, nsp, clf_scaler_feats=None, clf_factory=None, clffact_feats=None, clf_type='svm', norm=True, nfeats=100, balance_train=False): """ - arrfeats: labeled training examples array, from ppi.feature_array.arrfeats, also stored in res.cvtest result as result.exs.arrfeats. - arrsource: array of data to classify, matching the training array """ if clf_scaler_feats: clf, scaler, feats = clf_scaler_feats else: if balance_train: arrfeats = fe.balance_train(arrfeats) if clf_type in clf_factories and clf_factory is None: clf_factory, clffact_feats = clf_factories[clf_type] feats = feature_selection(arrfeats, nfeats, clffact_feats() if clffact_feats else None) arrfeats = fe.keep_cols(arrfeats, feats) clf = clf_factory() scaler = ml.fit_clf(arrfeats, clf, norm=norm) print "Classifier:", clf arrsource = fe.keep_cols(arrsource, feats) ppis = ml.classify(clf, arrsource, scaler=scaler) pres = Struct(ppis=ppis,name=name, species=sp, ppi_params=str(clf), feats=feats, nsp=nsp, arrfeats=arrfeats, balance_train=balance_train) return pres
def classify(agent_id, file): agent = get_agent(agent_id) print(agent) if agent == None: return None return ml.classify(agent, file)
def test(img, landmark=None, is_heatmap=False, binary_output=False, model=None): """Classify img""" net_input = img if is_heatmap: net_input = heat_map_compute(img, landmark, landmark_is_01=False, img_color=True, radius=occlu_param['radius']) if binary_output: return [ binary(_, threshold=0.5) for _ in classify(model, net_input) ] return classify(model, net_input)
def test(img, mean_shape=None, normalizer=None, model=None): """Compute prediction of input img""" if normalizer is not None: img = normalizer.transform(img) prediction = classify(model, img) if mean_shape is not None: prediction = np.reshape( prediction, (data_param['landmark_num'], 2)) + mean_shape return prediction
def fold_test(arrfeats, kfold, k, clf_factory, clffact_feats, nfeats, norm, balance_train): arrtrain, arrtest = fe.arr_kfold(arrfeats, kfold, k) if balance_train: arrtrain = fe.balance_train(arrtrain) if nfeats: clf_feats = clffact_feats() feats = feature_selection(arrtrain, nfeats, clf_feats) arrtrain,arrtest = [fe.keep_cols(a,feats) for a in arrtrain,arrtest] else: feats = None clf = clf_factory() if k==0: print "Classifier:", clf scaler = ml.fit_clf(arrtrain, clf, norm=norm) if ml.exist_pos_neg(arrtrain): ppis = ml.classify(clf, arrtest, scaler=scaler, do_sort=False) else: ppis = [] return ppis,clf,scaler,feats
def val_compute(imgs, labels, mean_shape=None, normalizer=None, model=None, loss_compute=None): """Compute interocular loss of input imgs and labels""" loss = 0.0 count = 0 for img, label in zip(imgs, labels): prediction = FaceAlignment.test(img, mean_shape=mean_shape, normalizer=normalizer, model=model) loss += loss_compute(prediction, label) if normalizer: img = normalizer.transform(img) prediction = classify(model, img) loss_elem = loss_compute(prediction, label) loss += loss_elem count += 1 if data_param['print_debug'] and count % 100 == 0: logger("predicted {} imgs".format(count)) logger("test loss is {}".format(loss / count))