def PPMIFT(cluster_names_fn, ranking_fn, file_name, do_p=False, data_type="movies", rewrite_files=False, limit_entities=False, classification="genres", lowest_amt=0, highest_amt=2147000000): pavPPMI_fn = "../data/" + data_type + "/finetune/" + file_name + ".txt" all_fns = [pavPPMI_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", pavPPMI.__name__) return else: print("Running task", pavPPMI.__name__) print("certainly still running that old pavPPMI task, yes sir") if limit_entities is False: classification = "all" ranking = dt.import2dArray(ranking_fn) names = dt.import1dArray(cluster_names_fn) frq = [] counter = 0 for name in names: name = name.split()[0] if ":" in name: name = name[:-1] frq.append( readPPMI(name, data_type, lowest_amt, highest_amt, classification)) dt.write2dArray(frq, pavPPMI_fn) return frq
def LDA(tf, names, components, file_name, doc_topic_prior, topic_word_prior, data_type, rewrite_files): # Removed model name as it was unused and I manually renamed a bunch of files and was too lazy to do model too rep_name = "../data/" + data_type + "/LDA/rep/" + file_name + ".txt" model_name = "../data/" + data_type + "/LDA/model/" + file_name + ".txt" names_name = "../data/" + data_type + "/LDA/names/" + file_name + ".txt" all_names = [rep_name, names_name] if dt.allFnsAlreadyExist(all_names) and not rewrite_files: print("Already completed") return print(len(tf), print(len(tf[0]))) print("Fitting LDA models with tf features,") lda = LatentDirichletAllocation(doc_topic_prior=doc_topic_prior, topic_word_prior=topic_word_prior, n_topics=components) t0 = time() tf = np.asarray(tf).transpose() new_rep = lda.fit_transform(tf) print("done in %0.3fs." % (time() - t0)) print("\nTopics in LDA model:") topics = print_top_words(lda, names) topics.reverse() dt.write1dArray( topics, "../data/" + data_type + "/LDA/names/" + file_name + ".txt") dt.write2dArray(new_rep.transpose(), rep_name) joblib.dump(lda, model_name)
def main(data_type, clf, min, max, depth, rewrite_files): dm_fn = "../data/" + data_type + "/mds/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "dm" dm_shorten_fn = "../data/" + data_type + "/mds/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "dmround" mds_fn = "../data/"+data_type+"/mds/class-all-" + str(min) + "-" + str(max) \ + "-" + clf+ "d" + str(depth) svd_fn = "../data/"+data_type+"/svd/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "d" + str(depth) pca_fn = "../data/"+data_type+"/pca/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "d" + str(depth) shorten_fn = "../data/" + data_type + "/bow/ppmi/class-all-" + str(min) + "-" + str(max) \ + "-" + clf+ "round" term_frequency_fn = init_vector_path = "../data/" + data_type + "/bow/ppmi/simple_numeric_stopwords_ppmi 2-all.npz" if dt.allFnsAlreadyExist([dm_fn, mds_fn, svd_fn, shorten_fn]): print("all files exist") exit() #Get MDS """ tf = dt.import2dArray(term_frequency_fn).transpose() pca = sparseSVD(tf, depth) dt.write2dArray(pca, pca_fn) """ # REMINDER: np.dot is WAY faster! tf = dt.import2dArray(term_frequency_fn, return_sparse=True) dm = getDsimMatrixDense(tf) dt.write2dArray(dm, dm_fn) print("wrote dm") """ Pretty sure none of this works
def logisticRegression(cluster_names_fn, ranking_fn, file_name, do_p=False, data_type="movies", rewrite_files=False, limit_entities=False, classification="genres", lowest_amt=0, highest_amt=2147000000, sparse_freqs_fn=None, bow_names_fn=None): lr_fn = "../data/" + data_type + "/finetune/boc/" + file_name + ".txt" all_fns = [lr_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", bagOfClusters.__name__) return else: print("Running task", bagOfClusters.__name__) if limit_entities is False: classification = "all" cluster_names = dt.import2dArray(cluster_names_fn, "s") bow_names = dt.import1dArray(bow_names_fn, "s") sparse_freqs = dt.import2dArray(sparse_freqs_fn, return_sparse=True) frq = getLROnBag(cluster_names, data_type, lowest_amt, highest_amt, classification, file_name, bow_names, sparse_freqs) dt.write2dArray(frq, lr_fn) return frq
def bagOfClusters(cluster_names_fn, ranking_fn, file_name, do_p=False, data_type="movies", rewrite_files=False, limit_entities=False, classification="genres", lowest_amt=0, highest_amt=2147000000): pavPPMI_fn = "../data/" + data_type + "/finetune/boc/" + file_name + ".txt" all_fns = [pavPPMI_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", bagOfClusters.__name__) return else: print("Running task", bagOfClusters.__name__) if limit_entities is False: classification = "all" ranking = dt.import2dArray(ranking_fn) names = dt.import2dArray(cluster_names_fn, "s") frq = writeBagOfClusters(names, data_type, lowest_amt, highest_amt, classification) dt.write2dArray(frq, pavPPMI_fn) return frq
def pavPPMI(cluster_names_fn, ranking_fn, file_name, do_p=False, data_type="movies", rewrite_files=False, limit_entities=False, classification="genres", lowest_amt=0, highest_amt=2147000000): pavPPMI_fn = "../data/" + data_type + "/finetune/" + file_name + ".txt" all_fns = [pavPPMI_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", pavPPMI.__name__) return else: print("Running task", pavPPMI.__name__) print("certainly still running that old pavPPMI task, yes sir") if limit_entities is False: classification = "all" ranking = dt.import2dArray(ranking_fn) names = dt.import1dArray(cluster_names_fn) frq = [] counter = 0 for name in names: name = name.split()[0] if ":" in name: name = name[:-1] frq.append( readPPMI(name, data_type, lowest_amt, highest_amt, classification)) pav_classes = [] for f in range(len(frq)): try: print(names[f]) x = np.asarray(frq[f]) y = ranking[f] ir = IsotonicRegression() y_ = ir.fit_transform(x, y) pav_classes.append(y_) if do_p: plot(x, y, y_) except ValueError: print(names[f], "len ppmi", len(frq[f], "len ranking", len(ranking[f]))) exit() print(f) dt.write2dArray(pav_classes, pavPPMI_fn) return pav_classes
def saveClusters(directions_fn, scores_fn, names_fn, filename, amt_of_dirs, data_type, cluster_amt, rewrite_files=False, algorithm="meanshift_k"): dict_fn = "../data/" + data_type + "/cluster/dict/" + filename + ".txt" cluster_directions_fn = "../data/" + data_type + "/cluster/clusters/" + filename + ".txt" all_fns = [dict_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", saveClusters.__name__) return else: print("Running task", saveClusters.__name__) p_dir = dt.import2dArray(directions_fn) p_names = dt.import1dArray(names_fn, "s") p_scores = dt.import1dArray(scores_fn, "f") ids = np.argsort(p_scores) p_dir = np.flipud(p_dir[ids])[:amt_of_dirs] p_names = np.flipud(p_names[ids])[:amt_of_dirs] if algorithm == "meanshift": labels = meanShift(p_dir) else: labels = kMeans(p_dir, cluster_amt) unique, counts = np.unique(labels, return_counts=True) clusters = [] dir_clusters = [] for i in range(len(unique)): clusters.append([]) dir_clusters.append([]) for i in range(len(labels)): clusters[labels[i]].append(p_names[i]) dir_clusters[labels[i]].append(p_dir[i]) cluster_directions = [] for l in range(len(dir_clusters)): cluster_directions.append(dt.mean_of_array(dir_clusters[l])) print("------------------------") for c in clusters: print(c) print("------------------------") dt.write2dArray(clusters, dict_fn) dt.write2dArray(cluster_directions, cluster_directions_fn)
def getNDCG(rankings_fn, fn, data_type, bow_fn, ppmi_fn, lowest_count, rewrite_files=False, highest_count=0, classification=""): # Check if the NDCG scores have already been calculated, if they have then skip. ndcg_fn = "../data/" + data_type + "/ndcg/" + fn + ".txt" all_fns = [ndcg_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", getNDCG.__name__) return else: print("Running task", getNDCG.__name__) # Get the file names for the PPMI values for every word and a list of words ("names") names = dt.import1dArray("../data/" + data_type + "/bow/names/" + bow_fn) ppmi = dt.import2dArray("../data/" + data_type + "/bow/ppmi/" + ppmi_fn) # Process the rankings and the PPMI line-by-line so as to not run out of memory ndcg_a = [] #spearman_a = [] with open(rankings_fn) as rankings: r = 0 for lr in rankings: for lp in ppmi: # Get the plain-number ranking of the rankings, e.g. "1, 4, 3, 50" sorted_indices = np.argsort( list(map(float, lr.strip().split())))[::-1] # Convert PPMI scores to floats # Get the NDCG score for the PPMI score, which is a valuation, compared to the indice of the rank ndcg = ndcg_from_ranking(lp, sorted_indices) # Add to array and print ndcg_a.append(ndcg) print("ndcg", ndcg, names[r], r) """ smr = spearmanr(ppmi_indices, sorted_indices)[1] spearman_a.append(smr) print("spearman", smr, names[r], r) """ r += 1 break # Save NDCG dt.write1dArray(ndcg_a, ndcg_fn)
def avgPPMI(cluster_names_fn, ranking_fn, file_name, do_p=False, data_type="movies", rewrite_files=False, classification="genres", lowest_amt=0, highest_amt=2147000000, limit_entities=False, save_results_so_far=False): pavPPMI_fn = "../data/" + data_type + "/finetune/" + file_name + ".txt" all_fns = [pavPPMI_fn] if dt.allFnsAlreadyExist( all_fns) and not rewrite_files or save_results_so_far: print("Skipping task", avgPPMI.__name__) return else: print("Running task", avgPPMI.__name__) if limit_entities is False: classification = "all" ranking = dt.import2dArray(ranking_fn) names = dt.import2dArray(cluster_names_fn, "s") for n in range(len(names)): for x in range(len(names[n])): if ":" in names[n][x]: names[n][x] = names[n][x][:-1] frq = [] counter = 0 for n in range(len(names)): name_frq = [] for name in names[n]: name_frq.append( readPPMI(name, data_type, lowest_amt, highest_amt, classification)) avg_frq = [] name_frq = np.asarray(name_frq).transpose() for name in name_frq: avg_frq.append(np.average(name)) frq.append(np.asarray(avg_frq)) print(n) dt.write2dArray(frq, pavPPMI_fn) return frq
def bagOfClustersPavPPMI(cluster_names_fn, ranking_fn, file_name, do_p=False, data_type="movies", rewrite_files=False, limit_entities=False, classification="genres", lowest_amt=0, highest_amt=2147000000, sparse_freqs_fn=None, bow_names_fn=None): pavPPMI_fn = "../data/" + data_type + "/finetune/boc/" + file_name + ".txt" all_fns = [pavPPMI_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", bagOfClustersPavPPMI.__name__) return else: print("Running task", bagOfClustersPavPPMI.__name__) if limit_entities is False: classification = "all" bow_names = dt.import1dArray(bow_names_fn, "s") sparse_freqs = dt.import2dArray(sparse_freqs_fn, return_sparse=True) ranking = dt.import2dArray(ranking_fn) cluster_names = dt.import2dArray(cluster_names_fn, "s") frq = getLROnBag(cluster_names, data_type, lowest_amt, highest_amt, classification, file_name, bow_names, sparse_freqs) pav_classes = [] for f in range(len(frq)): print(cluster_names[f]) x = np.asarray(frq[f]) y = ranking[f] ir = IsotonicRegression() y_ = ir.fit_transform(x, y) pav_classes.append(y_) if do_p: plot(x, y, y_) print(f) dt.write2dArray(pav_classes, pavPPMI_fn) return pav_classes
def main(data_type, clf, highest_amt, lowest_amt, depth, rewrite_files): min = lowest_amt max = highest_amt dm_fn = "../data/" + data_type + "/mds/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "dm" dm_shorten_fn = "../data/" + data_type + "/mds/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "dmround" mds_fn = "../data/"+data_type+"/mds/class-all-" + str(min) + "-" + str(max) \ + "-" + clf+ "d" + str(depth) svd_fn = "../data/"+data_type+"/svd/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "d" + str(depth) pca_fn = "../data/"+data_type+"/pca/class-all-" + str(min) + "-" + str(max) \ + "-" + clf + "d" + str(depth) shorten_fn = "../data/" + data_type + "/bow/ppmi/class-all-" + str(min) + "-" + str(max) \ + "-" + clf+ "round" term_frequency_fn = init_vector_path = "../data/" + data_type + "/bow/ppmi/class-all-" + str(min) + "-" + str(max) \ + "-" + clf if dt.allFnsAlreadyExist([dm_fn, mds_fn, svd_fn, shorten_fn]): print("all files exist") exit() if dt.fileExists(dm_fn) is False: newsgroups_train = fetch_20newsgroups(subset='train', shuffle=False) newsgroups_test = fetch_20newsgroups(subset='test', shuffle=False) vectors = np.concatenate((newsgroups_train.data, newsgroups_test.data), axis=0) newsgroups_test = None newsgroups_train = None # Get sparse tf rep tf_vectorizer = CountVectorizer(max_df=highest_amt, min_df=lowest_amt, stop_words='english') print("completed vectorizer") tf = tf_vectorizer.fit_transform(vectors) vectors = None # Get sparse PPMI rep from sparse tf rep print("done ppmisaprse") sparse_ppmi = convertPPMISparse(tf) # Get sparse Dsim matrix from sparse PPMI rep dm = getDissimilarityMatrixSparse(sparse_ppmi) dt.write2dArray(dm, dm_fn) else: dm = dt.import2dArray(dm_fn) print("starting mds") # Use as input to mds mds = createMDS(dm, depth) # save MDS dt.write2dArray(mds, mds_fn)
def getAllRankings(directions_fn, vectors_fn, cluster_names_fn, vector_names_fn, percent, percentage_increment, by_vector, fn, discrete=True, data_type="movies", rewrite_files=False): #labels_fn = "../data/"+data_type+"/rank/labels/" + fn + ".txt" rankings_fn = "../data/" + data_type + "/rank/numeric/" + fn + ".txt" #discrete_labels_fn = "../data/"+data_type+"/rank/discrete/" + fn + ".txt" all_fns = [rankings_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: for f in all_fns: print(f, "Already exists") print("Skipping task", "getAllRankings") return else: print("Running task", "getAllRankings") directions = dt.import2dArray(directions_fn) vectors = dt.import2dArray(vectors_fn) cluster_names = dt.import1dArray(cluster_names_fn) vector_names = dt.import1dArray(vector_names_fn) rankings = getRankings(directions, vectors, cluster_names, vector_names) rankings = np.asarray(rankings) if discrete: labels = createLabels(rankings, percent) labels = np.asarray(labels) discrete_labels = createDiscreteLabels(rankings, percentage_increment) discrete_labels = np.asarray(discrete_labels) if by_vector: labels = labels.transpose() if discrete: discrete_labels = discrete_labels.transpose() rankings = rankings.transpose() if discrete: dt.write2dArray(labels, labels_fn) dt.write2dArray(rankings, rankings_fn) if discrete: dt.write2dArray(discrete_labels, discrete_labels_fn)
def getAllPhraseRankings(directions_fn=None, vectors_fn=None, property_names_fn=None, vector_names_fn=None, fn="no filename", percentage_increment=1, scores_fn=None, top_amt=0, discrete=False, data_type="movies", rewrite_files=False): rankings_fn_all = "../data/" + data_type + "/rank/numeric/" + fn + "ALL.txt" all_fns = [rankings_fn_all] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", "getAllPhraseRankings") return else: print("Running task", "getAllPhraseRankings") directions = dt.import2dArray(directions_fn) vectors = dt.import2dArray(vectors_fn) property_names = dt.import1dArray(property_names_fn) vector_names = dt.import1dArray(vector_names_fn) if top_amt != 0: scores = dt.import1dArray(scores_fn, "f") directions = dt.sortByReverseArray(directions, scores)[:top_amt] property_names = dt.sortByReverseArray(property_names, scores)[:top_amt] rankings = getRankings(directions, vectors, property_names, vector_names) if discrete: discrete_labels = createDiscreteLabels(rankings, percentage_increment) discrete_labels = np.asarray(discrete_labels) for a in range(len(rankings)): rankings[a] = np.around(rankings[a], decimals=4) #dt.write1dArray(property_names, "../data/movies/bow/names/top5kof17k.txt") dt.write2dArray(rankings, rankings_fn_all)
def __init__(self, vector_path, class_path, property_names_fn, file_name, svm_type, training_size=10000, lowest_count=200, highest_count=21470000, get_kappa=True, get_f1=True, single_class=True, data_type="movies", getting_directions=True, threads=1, chunk_amt=0, chunk_id=0, rewrite_files=False, classification="all", loc="../data/"): self.get_kappa = True self.get_f1 = get_f1 self.data_type = data_type self.classification = classification self.lowest_amt = lowest_count self.higher_amt = highest_count if chunk_amt > 0: file_name = file_name + " CID" + str(chunk_id) + " CAMT" + str( chunk_amt) directions_fn = loc + data_type + "/svm/directions/" + file_name + ".txt" ktau_scores_fn = loc + data_type + "/svm/f1/" + file_name + ".txt" kappa_fn = loc + data_type + "/svm/kappa/" + file_name + ".txt" acc_fn = loc + data_type + "/svm/acc/" + file_name + ".txt" all_fns = [directions_fn, kappa_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", "getSVMResults") return else: print("Running task", "getSVMResults") y_train = 0 y_test = 0 vectors = np.asarray(dt.import2dArray(vector_path)) print("imported vectors") if not getting_directions: classes = np.asarray(dt.import2dArray(class_path)) print("imported classes") property_names = dt.import1dArray(property_names_fn) print("imported propery names") if chunk_amt > 0: if chunk_id == chunk_amt - 1: chunk = int(len(property_names) / chunk_amt) multiply = chunk_amt - 1 property_names = property_names[chunk * multiply:] else: property_names = dt.chunks( property_names, int( (len(property_names) / chunk_amt)))[chunk_id] if not getting_directions: x_train, x_test, y_train, y_test = train_test_split(vectors, classes, test_size=0.3, random_state=0) else: x_train = vectors x_test = vectors if get_f1: y_train = y_train.transpose() y_test = y_test.transpose() print("transpoosed") self.x_train = x_train self.x_test = x_test self.y_train = y_train self.y_test = y_test if self.get_f1 is False: print("running svms") kappa_scores, directions, ktau_scores, property_names = self.runAllSVMs( y_test, y_train, property_names, file_name, svm_type, getting_directions, threads) dt.write1dArray(kappa_scores, kappa_fn) dt.write2dArray(directions, directions_fn) dt.write1dArray(ktau_scores, ktau_scores_fn) dt.write1dArray(property_names, property_names_fn + file_name + ".txt") else: final_f1 = [] final_acc = [] for y in range(len(y_train)): f1, acc = self.runClassifySVM(y_test[y], y_train[y]) print(f1, acc) final_f1.append(f1) final_acc.append(acc) dt.write1dArray(final_f1, ktau_scores_fn) dt.write1dArray(final_acc, acc_fn)
def __init__(self, features_fn, classes_fn, class_names_fn, cluster_names_fn, filename, max_depth=None, balance=None, criterion="entropy", save_details=False, data_type="movies", cv_splits=5, csv_fn="../data/temp/no_csv_provided.csv", rewrite_files=True, split_to_use=-1, development=False, limit_entities=False, limited_label_fn=None, vector_names_fn=None, pruning=1, save_results_so_far=False): vectors = np.asarray(dt.import2dArray(features_fn)).transpose() labels = np.asarray(dt.import2dArray(classes_fn, "i")) print("vectors", len(vectors), len(vectors[0])) print("labels", len(labels), len(labels[0])) print("vectors", len(vectors), len(vectors[0])) cluster_names = dt.import1dArray(cluster_names_fn) label_names = dt.import1dArray(class_names_fn) all_fns = [] file_names = ['ACC J48' + filename, 'F1 J48' + filename] acc_fn = '../data/' + data_type + '/rules/tree_scores/' + file_names[ 0] + '.scores' f1_fn = '../data/' + data_type + '/rules/tree_scores/' + file_names[ 1] + '.scores' all_fns.append(acc_fn) all_fns.append(f1_fn) all_fns.append(csv_fn) print(dt.allFnsAlreadyExist(all_fns), rewrite_files) if dt.allFnsAlreadyExist( all_fns) and not rewrite_files or save_results_so_far: print("Skipping task", "Weka Tree") return else: print("Running task", "Weka Tree") for l in range(len(cluster_names)): cluster_names[l] = cluster_names[l].split()[0] """ for l in range(len(label_names)): if label_names[l][:6] == "class-": label_names[l] = label_names[l][6:] """ f1_array = [] accuracy_array = [] labels = labels.transpose() print("labels transposed") print("labels", len(labels), len(labels[0])) if limit_entities is False: vector_names = dt.import1dArray(vector_names_fn) limited_labels = dt.import1dArray(limited_label_fn) vectors = np.asarray( dt.match_entities(vectors, limited_labels, vector_names)) all_y_test = [] all_predictions = [] for l in range(len(labels)): if balance: new_vectors, new_labels = dt.balanceClasses(vectors, labels[l]) else: new_vectors = vectors new_labels = labels[l] # Select training data with cross validation ac_y_test = [] ac_y_train = [] ac_x_train = [] ac_x_test = [] ac_y_dev = [] ac_x_dev = [] cv_f1 = [] cv_acc = [] if cv_splits == 1: kf = KFold(n_splits=3, shuffle=False, random_state=None) else: kf = KFold(n_splits=cv_splits, shuffle=False, random_state=None) c = 0 for train, test in kf.split(new_vectors): if split_to_use > -1: if c != split_to_use: c += 1 continue ac_y_test.append(new_labels[test]) ac_y_train.append(new_labels[train[int(len(train) * 0.2):]]) val = int(len(train) * 0.2) t_val = train[val:] nv_t_val = new_vectors[t_val] ac_x_train.append(nv_t_val) ac_x_test.append(new_vectors[test]) ac_x_dev.append(new_vectors[train[:int(len(train) * 0.2)]]) ac_y_dev.append(new_labels[train[:int(len(train) * 0.2)]]) c += 1 if cv_splits == 1: break predictions = [] rules = [] if development: ac_x_test = np.copy(np.asarray(ac_x_dev)) ac_y_test = np.copy(np.asarray(ac_y_dev)) train_fn = "../data/" + data_type + "/weka/data/" + filename + "Train.txt" test_fn = "../data/" + data_type + "/weka/data/" + filename + "Test.txt" for splits in range(len(ac_y_test)): # Get the weka predictions dt.writeArff(ac_x_train[splits], [ac_y_train[splits]], [label_names[splits]], train_fn, header=True) dt.writeArff(ac_x_test[splits], [ac_y_test[splits]], [label_names[splits]], test_fn, header=True) prediction, rule = self.getWekaPredictions( train_fn + label_names[splits] + ".arff", test_fn + label_names[splits] + ".arff", save_details, pruning) predictions.append(prediction) rules.append(rule) for i in range(len(predictions)): if len(predictions) == 1: all_y_test.append(ac_y_test[i]) all_predictions.append(predictions[i]) f1 = f1_score(ac_y_test[i], predictions[i], average="binary") accuracy = accuracy_score(ac_y_test[i], predictions[i]) cv_f1.append(f1) cv_acc.append(accuracy) scores = [[label_names[l], "f1", f1, "accuracy", accuracy]] print(scores) # Export a tree for each label predicted by the clf, not sure if this is needed... if save_details: data_fn = "../data/" + data_type + "/rules/weka_rules/" + label_names[ l] + " " + filename + ".txt" class_names = [label_names[l], "NOT " + label_names[l]] #self.get_code(clf, cluster_names, class_names, label_names[l] + " " + filename, data_type) dt.write1dArray(rules[i].split("\n"), data_fn) dot_file = dt.import1dArray(data_fn) new_dot_file = [] for line in dot_file: if "->" not in line and "label" in line and '"t ' not in line and '"f ' not in line: line = line.split('"') line[1] = '"' + cluster_names[int(line[1])] + '"' line = "".join(line) new_dot_file.append(line) dt.write1dArray(new_dot_file, data_fn) graph = pydot.graph_from_dot_file(data_fn) graph.write_png("../data/" + data_type + "/rules/weka_images/" + label_names[l] + " " + filename + ".png") f1_array.append(np.average(np.asarray(cv_f1))) accuracy_array.append(np.average(np.asarray(cv_acc))) accuracy_array = np.asarray(accuracy_array) accuracy_average = np.average(accuracy_array) accuracy_array = accuracy_array.tolist() f1_array = np.asarray(f1_array) f1_average = np.average(f1_array) f1_array = f1_array.tolist() micro_average = f1_score(np.asarray(all_y_test), np.asarray(all_predictions), average="micro") print("Micro F1", micro_average) accuracy_array.append(accuracy_average) accuracy_array.append(0.0) f1_array.append(f1_average) f1_array.append(micro_average) scores = [accuracy_array, f1_array] dt.write1dArray(accuracy_array, acc_fn) dt.write1dArray(f1_array, f1_fn) print(csv_fn) if dt.fileExists(csv_fn): print("File exists, writing to csv") try: dt.write_to_csv(csv_fn, file_names, scores) except PermissionError: print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") dt.write_to_csv( csv_fn[:len(csv_fn) - 4] + str(random.random()) + "FAIL.csv", file_names, scores) else: print("File does not exist, recreating csv") key = [] for l in label_names: key.append(l) key.append("AVERAGE") key.append("MICRO AVERAGE") dt.write_csv(csv_fn, file_names, scores, key)
def __init__(self, vector_path, class_path, property_names_fn, file_name, svm_type, training_size=10000, lowest_count=200, highest_count=21470000, get_kappa=True, get_f1=True, single_class=True, data_type="movies", getting_directions=True, threads=1, chunk_amt=0, chunk_id=0, rewrite_files=False, classification="all", loc="../data/", logistic_regression=False, sparse_array_fn=None, only_these_fn=None): self.get_kappa = True self.get_f1 = get_f1 self.data_type = data_type self.classification = classification self.lowest_amt = lowest_count self.higher_amt = highest_count if chunk_amt > 0: file_name = file_name + " CID" + str(chunk_id) + " CAMT" + str( chunk_amt) directions_fn = loc + data_type + "/svm/directions/" + file_name + ".txt" ktau_scores_fn = loc + data_type + "/svm/f1/" + file_name + ".txt" kappa_fn = loc + data_type + "/svm/kappa/" + file_name + ".txt" acc_fn = loc + data_type + "/svm/acc/" + file_name + ".txt" TP_fn = loc + data_type + "/svm/stats/TP " + file_name + ".txt" FP_fn = loc + data_type + "/svm/stats/FP " + file_name + ".txt" TN_fn = loc + data_type + "/svm/stats/TN " + file_name + ".txt" FN_fn = loc + data_type + "/svm/stats/FN " + file_name + ".txt" all_fns = [directions_fn, kappa_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", "getSVMResults") return else: print("Running task", "getSVMResults") y_train = 0 y_test = 0 vectors = np.asarray(dt.import2dArray(vector_path)) print("imported vectors") if not getting_directions: classes = np.asarray(dt.import2dArray(class_path)) print("imported classes") property_names = dt.import1dArray(property_names_fn) print("imported propery names") if chunk_amt > 0: if chunk_id == chunk_amt - 1: chunk = int(len(property_names) / chunk_amt) multiply = chunk_amt - 1 property_names = property_names[chunk * multiply:] else: property_names = dt.chunks( property_names, int( (len(property_names) / chunk_amt)))[chunk_id] if sparse_array_fn is not None: sparse_array = dt.import2dArray(sparse_array_fn) else: sparse_array = None if sparse_array is not None: for s in range(len(sparse_array)): if len(np.nonzero(sparse_array[s])[0]) <= 1: print("WILL FAIL", s, len(np.nonzero(sparse_array[s])[0])) else: print(len(np.nonzero(sparse_array[s])[0])) if not getting_directions: x_train, x_test, y_train, y_test = train_test_split(vectors, classes, test_size=0.3, random_state=0) else: x_train = vectors x_test = vectors if get_f1: y_train = y_train.transpose() y_test = y_test.transpose() print("transpoosed") self.x_train = x_train self.x_test = x_test self.y_train = y_train self.y_test = y_test if only_these_fn is not None: only_these = dt.import1dArray(only_these_fn, "s") inds = [] for s in range(len(property_names)): for o in only_these: if property_names[s] == o: inds.append(s) break sparse_array = sparse_array[inds] property_names = property_names[inds] if self.get_f1 is False: print("running svms") kappa_scores, directions, f1_scores, property_names, accs, TPs, FPs, TNs, FNs = self.runAllSVMs( y_test, y_train, property_names, file_name, svm_type, getting_directions, threads, logistic_regression, sparse_array) dt.write1dArray(kappa_scores, kappa_fn) dt.write2dArray(directions, directions_fn) dt.write1dArray(f1_scores, ktau_scores_fn) dt.write1dArray(accs, acc_fn) dt.write1dArray(TPs, TP_fn) dt.write1dArray(FPs, FP_fn) dt.write1dArray(TNs, TN_fn) dt.write1dArray(FNs, FN_fn) dt.write1dArray(property_names, property_names_fn + file_name + ".txt") else: final_f1 = [] final_acc = [] for y in range(len(y_train)): f1, acc = self.runClassifySVM(y_test[y], y_train[y]) print(f1, acc) final_f1.append(f1) final_acc.append(acc) dt.write1dArray(final_f1, ktau_scores_fn) dt.write1dArray(final_acc, acc_fn)
def __init__(self, class_path=None, get_scores=False, randomize_finetune_weights=False, dropout_noise=None, amount_of_hidden=0, epochs=1, learn_rate=0.01, loss="mse", batch_size=1, past_model_bias_fn=None, identity_swap=False, reg=0.0, amount_of_finetune=[], output_size=25, hidden_activation="tanh", layer_init="glorot_uniform", output_activation="tanh", deep_size=None, corrupt_finetune_weights=False, split_to_use=-1, hidden_layer_size=100, file_name="unspecified_filename", vector_path=None, is_identity=False, finetune_size=0, data_type="movies", optimizer_name="rmsprop", noise=0.0, fine_tune_weights_fn=None, past_model_weights_fn=None, from_ae=True, save_outputs=False, label_names_fn="", rewrite_files=False, cv_splits=1, cutoff_start=0.2, development=False, class_weight=None, csv_fn=None, tune_vals=False, get_nnet_vectors_path=None, classification_name="all", limit_entities=False, limited_label_fn="", vector_names_fn="", identity_activation="linear", loc="../data/", lock_weights_and_redo=False): total_file_name = loc + data_type + "/nnet/spaces/" + file_name weights_fn = loc + data_type + "/nnet/weights/" + file_name + "L0.txt" bias_fn = loc + data_type + "/nnet/bias/" + file_name + "L0.txt" rank_fn = loc + data_type + "/nnet/clusters/" + file_name + ".txt" all_fns = [weights_fn, bias_fn, rank_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", "nnet") return else: print("Running task", "nnet") self.class_path = class_path self.learn_rate = learn_rate self.epochs = epochs self.loss = loss self.batch_size = batch_size self.hidden_activation = hidden_activation self.layer_init = layer_init self.output_activation = output_activation self.hidden_layer_size = hidden_layer_size self.file_name = file_name self.vector_path = vector_path self.dropout_noise = dropout_noise self.finetune_size = finetune_size self.get_scores = get_scores self.reg = reg self.amount_of_finetune = amount_of_finetune self.amount_of_hidden = amount_of_hidden self.output_size = output_size self.identity_swap = identity_swap self.deep_size = deep_size self.from_ae = from_ae self.is_identity = is_identity self.randomize_finetune_weights = randomize_finetune_weights self.corrupt_finetune_weights = corrupt_finetune_weights self.deep_size = deep_size self.fine_tune_weights_fn = fine_tune_weights_fn self.identity_activation = identity_activation self.lock_weights_and_redo = lock_weights_and_redo print(data_type) if optimizer_name == "adagrad": self.optimizer = Adagrad() elif optimizer_name == "sgd": self.optimizer = SGD() elif optimizer_name == "rmsprop": self.optimizer = RMSprop() elif optimizer_name == "adam": self.optimizer = Adam() elif optimizer_name == "adadelta": self.optimizer = Adadelta() else: print("optimizer not found") exit() entity_vectors = np.asarray(dt.import2dArray(self.vector_path)) print("Imported vectors", len(entity_vectors), len(entity_vectors[0])) if get_nnet_vectors_path is not None: nnet_vectors = np.asarray(dt.import2dArray(get_nnet_vectors_path)) print("Imported vectors", len(entity_vectors), len(entity_vectors[0])) entity_classes = np.asarray(dt.import2dArray(self.class_path)) print("Imported classes", len(entity_classes), len(entity_classes[0])) if fine_tune_weights_fn is None: vector_names = dt.import1dArray(vector_names_fn) limited_labels = dt.import1dArray(limited_label_fn) entity_vectors = np.asarray( dt.match_entities(entity_vectors, limited_labels, vector_names)) if fine_tune_weights_fn is not None: if len(entity_vectors) != len(entity_classes): entity_classes = entity_classes.transpose() print("Transposed classes, now in form", len(entity_classes), len(entity_classes[0])) """ # IF Bow if len(entity_vectors[0]) != len(entity_classes[0]): entity_vectors = entity_vectors.transpose() print("Transposed vectors, now in form", len(entity_vectors), len(entity_vectors[0])) """ elif len(entity_vectors) != len(entity_classes): entity_vectors = entity_vectors.transpose() print("Transposed vectors, now in form", len(entity_vectors), len(entity_vectors[0])) self.input_size = len(entity_vectors[0]) self.output_size = len(entity_classes[0]) if fine_tune_weights_fn is not None: model_builder = self.fineTuneNetwork weights = [] if from_ae: self.past_weights = [] past_model_weights = [] for p in past_model_weights_fn: past_model_weights.append( np.asarray(dt.import2dArray(p), dtype="float64")) past_model_bias = [] for p in past_model_bias_fn: past_model_bias.append( np.asarray(dt.import1dArray(p, "f"), dtype="float64")) for p in range(len(past_model_weights)): past_model_weights[p] = np.around(past_model_weights[p], decimals=6) past_model_bias[p] = np.around(past_model_bias[p], decimals=6) for p in range(len(past_model_weights)): self.past_weights.append([]) self.past_weights[p].append(past_model_weights[p]) self.past_weights[p].append(past_model_bias[p]) for f in fine_tune_weights_fn: weights.extend(dt.import2dArray(f)) r = np.asarray(weights, dtype="float64") r = np.asarray(weights, dtype="float64") for a in range(len(r)): r[a] = np.around(r[a], decimals=6) for a in range(len(entity_classes)): entity_classes[a] = np.around(entity_classes[a], decimals=6) self.fine_tune_weights = [] self.fine_tune_weights.append(r.transpose()) self.fine_tune_weights.append( np.zeros(shape=len(r), dtype="float64")) else: model_builder = self.classifierNetwork models = [] x_train = [] y_train = [] x_test = [] y_test = [] x_dev = [] y_dev = [] train_x_c = [] train_y_c = [] c = 0 for i in range(cv_splits): if split_to_use > -1: if c != split_to_use: c += 1 continue models.append(model_builder()) c += 1 # Converting labels to categorical f1_scores = [] accuracy_scores = [] f1_averages = [] accuracy_averages = [] if cv_splits == 1: k_fold = KFold(n_splits=3, shuffle=False, random_state=None) else: k_fold = KFold(n_splits=cv_splits, shuffle=False, random_state=None) c = 0 for train, test in k_fold.split(entity_vectors): if split_to_use > -1: if c != split_to_use: c += 1 continue x_train.append(entity_vectors[train[:int(len(train) * 0.8)]]) y_train.append(entity_classes[train[:int(len(train) * 0.8)]]) x_test.append(entity_vectors[test]) y_test.append(entity_classes[test]) x_dev.append(entity_vectors[train[int(len(train) * 0.8):len(train)]]) y_dev.append(entity_classes[train[int(len(train) * 0.8):len(train)]]) train_x_c, train_y_c = entity_vectors[ train[:int(len(train) * 0.8)]], entity_classes[train[:int(len(train) * 0.8)]] if fine_tune_weights_fn is not None: train_x_c = entity_vectors train_y_c = entity_classes hist = models[0].fit(train_x_c, train_y_c, nb_epoch=self.epochs, batch_size=self.batch_size, verbose=1, class_weight=class_weight) print(hist.history) c += 1 if cv_splits == 1 or split_to_use == c: break if lock_weights_and_redo: print("REDO WITH LOCKED WEIGHTS") unlocked_model = Sequential() for l in range(0, len(models[0].layers) - 1): unlocked_model.add(models[0].layers[l]) self.end_space = unlocked_model.predict(entity_vectors) total_file_name = loc + data_type + "/nnet/spaces/" + file_name dt.write2dArray(self.end_space, total_file_name + "L" + str(l) + "LSPACE" + ".txt") unlocked_model.add( Dense(output_dim=finetune_size, input_dim=self.hidden_layer_size, activation="linear", weights=self.fine_tune_weights)) # unlocked_model.compile(loss=self.loss, optimizer=self.optimizer) models[0] = unlocked_model hist = models[0].fit(train_x_c, train_y_c, nb_epoch=self.epochs, batch_size=self.batch_size, verbose=1, class_weight=class_weight) original_fn = file_name for m in range(len(models)): if development: x_test[m] = x_dev[m] y_test[m] = y_dev[m] if get_scores: vals_to_try = np.arange(start=cutoff_start, stop=1, step=0.01) test_pred = models[m].predict(x_train[m]).transpose() print(test_pred) y_train_m = np.asarray(y_train[m]).transpose() highest_f1 = [0] * len(test_pred) highest_vals = [0.2] * len(test_pred) if tune_vals: for c in range(len(test_pred)): for val in vals_to_try: test_pred_c = np.copy(test_pred[c]) test_pred_c[test_pred_c >= val] = 1 test_pred_c[test_pred_c < val] = 0 acc = accuracy_score(y_train_m[c], test_pred_c) f1 = f1_score(y_train_m[c], test_pred_c, average="binary") f1 = (f1 + acc) / 2 if f1 > highest_f1[c]: highest_f1[c] = f1 highest_vals[c] = val print("optimal f1s", highest_f1) print("optimal vals", highest_vals) y_pred = models[m].predict(x_test[m]).transpose() y_test[m] = np.asarray(y_test[m]).transpose() for y in range(len(y_pred)): y_pred[y][y_pred[y] >= highest_vals[y]] = 1 y_pred[y][y_pred[y] < highest_vals[y]] = 0 f1_array = [] accuracy_array = [] for y in range(len(y_pred)): accuracy_array.append( accuracy_score(y_test[m][y], y_pred[y])) f1_array.append( f1_score(y_test[m][y], y_pred[y], average="binary")) print(f1_array[y]) y_pred = y_pred.transpose() y_test[m] = np.asarray(y_test[m]).transpose() micro_average = f1_score(y_test[m], y_pred, average="micro") cv_f1_fn = loc + data_type + "/nnet/scores/F1 " + file_name + ".txt" cv_acc_fn = loc + data_type + "/nnet/scores/ACC " + file_name + ".txt" dt.write1dArray(f1_array, cv_f1_fn) dt.write1dArray(accuracy_array, cv_acc_fn) f1_scores.append(f1_array) accuracy_scores.append(accuracy_array) f1_average = np.average(f1_array) accuracy_average = np.average(accuracy_array) f1_averages.append(f1_average) accuracy_averages.append(accuracy_average) print("Average F1 Binary", f1_average, "Acc", accuracy_average) print("Micro Average F1", micro_average) f1_array.append(f1_average) f1_array.append(micro_average) accuracy_array.append(accuracy_average) accuracy_array.append(0.0) scores = [accuracy_array, f1_array] csv_fn = loc + data_type + "/nnet/csv/" + csv_fn + ".csv" file_names = [file_name + "ACC", file_name + "F1"] label_names = dt.import1dArray(label_names_fn) if dt.fileExists(csv_fn): print("File exists, writing to csv") try: dt.write_to_csv(csv_fn, file_names, scores) except PermissionError: print("CSV FILE WAS OPEN, WRITING TO ANOTHER FILE") dt.write_to_csv( csv_fn[:len(csv_fn) - 4] + str(random.random()) + "FAIL.csv", [file_name], scores) else: print("File does not exist, recreating csv") key = [] for l in label_names: key.append(l) key.append("AVERAGE") key.append("MICRO AVERAGE") dt.write_csv(csv_fn, file_names, scores, key) if save_outputs: if limit_entities is False: self.output_clusters = models[m].predict(nnet_vectors) else: self.output_clusters = models[m].predict(entity_vectors) self.output_clusters = self.output_clusters.transpose() dt.write2dArray(self.output_clusters, rank_fn) for l in range(0, len(models[m].layers) - 1): if dropout_noise is not None and dropout_noise > 0.0: if l % 2 == 1: continue print("Writing", l, "layer") truncated_model = Sequential() for a in range(l + 1): truncated_model.add(models[m].layers[a]) truncated_model.compile(loss=self.loss, optimizer="sgd") if get_nnet_vectors_path is not None: self.end_space = truncated_model.predict(nnet_vectors) else: self.end_space = truncated_model.predict(entity_vectors) total_file_name = loc + data_type + "/nnet/spaces/" + file_name dt.write2dArray(self.end_space, total_file_name + "L" + str(l) + ".txt") for l in range(len(models[m].layers)): try: dt.write2dArray( models[m].layers[l].get_weights()[0], loc + data_type + "/nnet/weights/" + file_name + "L" + str(l) + ".txt") dt.write1dArray( models[m].layers[l].get_weights()[1], loc + data_type + "/nnet/bias/" + file_name + "L" + str(l) + ".txt") except IndexError: print("Layer ", str(l), "Failed") if cv_splits > 1: class_f1_averages = [] class_accuracy_averages = [] f1_scores = np.asarray(f1_scores).transpose() accuracy_scores = np.asarray(accuracy_scores).transpose() for c in range(len(f1_scores)): class_f1_averages.append(np.average(f1_scores[c])) class_accuracy_averages.append(np.average(accuracy_scores[c])) f1_fn = loc + data_type + "/nnet/scores/F1 " + file_name + ".txt" acc_fn = loc + data_type + "/nnet/scores/ACC " + file_name + ".txt" dt.write1dArray(class_f1_averages, f1_fn) dt.write1dArray(class_accuracy_averages, acc_fn) overall_f1_average = np.average(f1_averages) overall_accuracy_average = np.average(accuracy_averages)
def initClustering(vector_fn, directions_fn, scores_fn, names_fn, amt_to_start, profiling, max_clusters, score_limit, file_name, score_type, similarity_threshold, add_all_terms=False, data_type="movies", largest_clusters=1, rewrite_files=False, lowest_amt=0, highest_amt=0, classification="genres", min_score=0, min_size = 1, dissim=0.0, dissim_amt=0, find_most_similar=False, get_all=False, half_ndcg_half_kappa = "", only_most_similar=False, dont_cluster=0): output_directions_fn = "../data/" + data_type + "/cluster/hierarchy_directions/"+file_name+".txt" output_names_fn = "../data/" + data_type + "/cluster/hierarchy_names/" + file_name +".txt" all_directions_fn = "../data/" + data_type + "/cluster/all_directions/" + file_name + ".txt" all_names_fn = "../data/" + data_type + "/cluster/all_names/" + file_name + ".txt" all_fns = [output_directions_fn, output_names_fn, all_directions_fn, all_names_fn] if dt.allFnsAlreadyExist(all_fns) and not rewrite_files: print("Skipping task", getBreakOffClusters.__name__) return else: print("Running task", getBreakOffClusters.__name__) vectors = dt.import2dArray(vector_fn) directions = dt.import2dArray(directions_fn) scores = dt.import1dArray(scores_fn, "f") names = dt.import1dArray(names_fn) type1 = np.ones(int(len(names)/2)) type2 = np.zeros(int(len(names)/2)) shuffle_ind = np.asarray(list(range(0, len(type1)))) type = np.insert(type1, shuffle_ind, type2) # Kappa = 0, NDCG = 1 if len(half_ndcg_half_kappa) > 0: kappa_scores = dt.import1dArray(half_ndcg_half_kappa, "f") if amt_to_start > 0: if len(half_ndcg_half_kappa) == 0: ind = np.flipud(np.argsort(scores))[:amt_to_start] #Top X scoring else: ind1 = np.flipud(np.argsort(scores))[:amt_to_start/2] ind2 = np.zeros(len(ind1), dtype="int") kappa_scores = np.flipud(np.argsort(kappa_scores)) count = 0 added = 0 for i in kappa_scores: if i not in ind1: ind2[added] = i added += 1 if added >= amt_to_start/2: break count += 1 shuffle_ind = np.asarray(list(range(0, len(ind1)))) ind = np.insert(ind1, shuffle_ind, ind2) else: ind = np.flipud(np.argsort(scores)) ind = [i for i in ind if scores[i] > min_score] top_directions = [] top_scores = [] top_names = [] for i in ind: top_directions.append(directions[i]) top_names.append(names[i]) top_scores.append(scores[i]) if profiling: cProfile.runctx('getBreakOffClusters(vectors, top_directions, top_scores, top_names, score_limit, \ max_clusters, file_name, kappa, similarity_threshold, add_all_terms, data_type, \ largest_clusters, rewrite_files=rewrite_files, lowest_amt=lowest_amt, highest_amt=highest_amt, \ classification=classification, min_size = min_size, dissim=dissim, dissim_amt=dissim_amt, \ find_most_similar=find_most_similar, get_all=get_all, half_ndcg_half_kappa=type)', globals(), locals()) else: getBreakOffClusters(vectors, top_directions, top_scores, top_names, score_limit, max_clusters, file_name, score_type, similarity_threshold, add_all_terms, data_type, largest_clusters, rewrite_files=rewrite_files, lowest_amt=lowest_amt, highest_amt=highest_amt, classification=classification, min_size = min_size, dissim=dissim, dissim_amt=dissim_amt, find_most_similar=find_most_similar, get_all=get_all, half_ndcg_half_kappa=type, only_most_similar=only_most_similar, dont_cluster=dont_cluster)