def GetQuestionMyOneDirection(anchor_ims, checkpoint_dir, data_bin, data_path, n_features, direction): print(anchor_ims) dir_ = "/Users/admin/Desktop/UVA/third/CycleGAN-tensorflow/" for i in glob(os.path.join("result/", "*.jpg")): subprocess.call(["rm", i]) for anchor_im in anchor_ims: subprocess.call(["cp", data_path[anchor_im], "result/"]) for i in glob(os.path.join(dir_ + "test1/", "*.jpg")): subprocess.call(["rm", i]) #for i in glob(os.path.join(dir_ + "test/", "*.jpg")): # subprocess.call(["rm", i]) subprocess.call([ "python", dir_ + "main.py", "--dataset_a", "result/", "--dataset_b", "result/", "--which_direction", direction, "--phase", "test", "--checkpoint_dir", checkpoint_dir, "--test_dir", dir_ + "test/", "--test_dir_a", dir_ + "test1/", "--test_dir_b", dir_ + "test/" ]) images = [] file = glob(os.path.join(dir_ + "test1/", "*.jpg")) for f in file: gist = Gist(f) svm = [] for i in range(n_features): svm.append( np.genfromtxt("../CycleGAN_shoes/Black/" + str(i + 1) + ".txt")) res = np.array([np.dot(svm[i], gist) for i in range(n_features)]) print(res) images += FindTheClosestImage(data_bin.keys(), data_bin, res, 0, 1, n_features) return images
def main(): args = parse_args() path = "../data/lgs/" + args.lg + "/" + args.lg features, by_speaker, no_normalize, to_normalize = get_features( args.features_csv, args.lg) x, y, data = read_norm(args.lg, by_speaker, no_normalize, to_normalize, features) svm = [] rf = [] for feat in args.to_ablate.split(", "): # Remove feature from x x_abl = x.drop([feat], axis=1, inplace=False) # Run SVM and RF SVM_rs_aprf = SVM_rs(x_abl, y, features, args.lg) SVM_rs_aprf.insert(0, feat) RF_rs_aprf = RF_rs(x_abl, y, features, args.lg) RF_rs_aprf.insert(0, feat) svm.append(SVM_rs_aprf) rf.append(RF_rs_aprf) svm = pd.DataFrame(svm, columns=[ 'feat', 'acc', 'pB', 'pM', 'pC', 'rB', 'rM', 'rC', 'fB', 'fM', 'fC' ]) rf = pd.DataFrame(rf, columns=[ 'feat', 'acc', 'pB', 'pM', 'pC', 'rB', 'rM', 'rC', 'fB', 'fM', 'fC' ]) svm.to_csv(path + "-abl-SVM.csv") rf.to_csv(path + "-abl-RF.csv") return
def main(): args = parse_args() path = "../data/lgs/" + args.lg + "/" + args.lg features, by_speaker, no_normalize, to_normalize, cat_dict = get_features(args.features_csv, args.lg) x, y, data = read_norm(args.lg, by_speaker, no_normalize, to_normalize, features) svm = [] rf = [] for cat in cat_dict: x_abl = x # Loop through features in each cat and remove them for feat in cat_dict[cat]: x_abl = x_abl.drop([feat], axis = 1, inplace = False) # Run classifiers SVM_rs_aprf = SVM_rs(x_abl, y, features, args.lg) SVM_rs_aprf.insert(0, cat) SVM_rs_aprf.append(len(cat_dict[cat])) RF_rs_aprf = RF_rs(x_abl, y, features, args.lg) RF_rs_aprf.insert(0, cat) RF_rs_aprf.append(len(cat_dict[cat])) svm.append(SVM_rs_aprf) rf.append(RF_rs_aprf) svm = pd.DataFrame(svm, columns = ['cat', 'acc', 'f1', 'pB', 'pM', 'pC', 'rB', 'rM', 'rC', 'fB', 'fM', 'fC', 'n feat']) rf = pd.DataFrame(rf, columns = ['cat', 'acc', 'f1', 'pB', 'pM', 'pC', 'rB', 'rM', 'rC', 'fB', 'fM', 'fC', 'n feat']) svm.to_csv(path + "-abl-SVM.csv") rf.to_csv(path + "-abl-RF.csv") """
def pick_clf(labels, features): svm = [] dt = [] knn = [] for i in range(100): svm.append(test_svm(labels, features)) dt.append(test_dt(labels, features)) knn.append(test_knn(labels, features)) svm = sum([x[0] for x in svm]) / len(svm) dt = sum([x[0] for x in dt]) / len(dt) knn = sum([x[0] for x in knn]) / len(knn) print("average accuracy using svm: ", round(svm, 2)) print("average accuracy using dt: ", round(dt, 2)) print("average accuracy using knn: ", round(knn, 2))
def main(): np.set_printoptions(suppress=True) data, target = loadCsv('data2.csv') #statistics(data) dataset_clean, target_clean = distanceMatOut(np.array(data), np.array(target)) bayes_simple = [] logreg = [] bayes_quad = [] percep = [] mlp = [] svm = [] for i in range(0, 10): data_train, data_test, target_train, target_test = kfoldSplit( data, target, 10, i) #data_train = zscore(data_train) print "Bayesian Classifier" stats = bayesianClassifier(data_train, data_test, target_train, target_test) bayes_simple.append(stats) print "LogisticRegression" stats = logisticRegression(data_train, data_test, target_train, target_test) logreg.append(stats) print "Quadratic Bayesian Classifier" stats = quadraticClassifier(data_train, data_test, target_train, target_test) bayes_quad.append(stats) #data_test = zscore(data_test) scaler = StandardScaler() scaler.fit(data_train) data_train = scaler.transform(data_train) data_test = scaler.transform(data_test) print "Perceptron" stats = perceptron(data_train, data_test, target_train, target_test) percep.append(stats) print "MLP" stats = multiLayerPerceptron(data_train, data_test, target_train, target_test) mlp.append(stats) print "SVM" stats = svmLinearKernel(data_train, data_test, target_train, target_test) print stats svm.append(stats) print "Bayesian Classifier" conf_bayes = np.mean(np.transpose(np.array(bayes_simple)), axis=1) base_name = 'naive_bayes' plotPREREC(conf_bayes, '../img/' + base_name + '_rec.png') saveConfMat(conf_bayes, base_name + '_conf.csv') print conf_bayes print "LogisticRegression" conf_logreg = np.mean(np.transpose(np.array(logreg)), axis=1) base_name = 'log_reg' plotPREREC(conf_logreg, '../img/' + base_name + '_rec.png') saveConfMat(conf_logreg, base_name + '_conf.csv') print conf_logreg print "Quadratic Bayesian Classifier" conf_quad_bayes = np.mean(np.transpose(np.array(bayes_quad)), axis=1) print conf_quad_bayes base_name = 'quad_bayes' plotPREREC(conf_quad_bayes, '../img/' + base_name + '_rec.png') saveConfMat(conf_quad_bayes, base_name + '_conf.csv') print "Perceptron" conf_perc = np.mean(np.transpose(np.array(percep)), axis=1) print conf_perc base_name = 'perc' plotPREREC(conf_perc, '../img/' + base_name + '_rec.png') saveConfMat(conf_perc, base_name + '_conf.csv') print "MLP" conf_mlp = np.mean(np.transpose(np.array(mlp)), axis=1) print conf_mlp base_name = 'mlp' plotPREREC(conf_mlp, '../img/' + base_name + '_rec.png') saveConfMat(conf_mlp, base_name + '_conf.csv') print "SVM" conf_svm = np.mean(np.transpose(np.array(svm)), axis=1) print conf_svm base_name = 'svm' plotPREREC(conf_svm, '../img/' + base_name + '_rec.png') saveConfMat(conf_svm, base_name + '_conf.csv')
#print ("SVM:Train set score predict: {:.6f}".format(accuracy_svc_train)) print("SVM:Test set score: {:.6f}".format(linearSVC.score(X_test, y_test))) #preds_test_svc = linearSVC.predict(X_test) #accuracy_svc_test = accuracy_score(y_test,preds_test_svc) #print ("SVM:Test set score predict: {:.6f}".format(accuracy_svc_test)) return linearSVC.score(X_test, y_test) # In[14]: svm = [] for i in range(1, iterations): accuracy_svc = svm_30_features() #print("accuracy_svm=%f "%accuracy_svm) svm.append(accuracy_svc) print(svm) # ## SVM regression on PCA reduced set # In[15]: #svc = svm.SVC(C=10) #svc.fit(train,train_labels) #preds_svc_reduced = svc.predict(test) #accuracy_svc_reduced = accuracy_score(test_labels,preds_svc_reduced) #print(train.shape) #print("accuracy_svm_reduced=%f "%accuracy_svc_reduced) def svm_30_features_reduced():