def genetic_algorithm(args): minmax = (args.min, args.max) N = args.N gens = args.gens solution = common.initialize(N,minmax) best_fitness = common.fitness(solution[0]) for gen in range(1, gens): if gen % 10 == 0: print("Generation :#%d" % gen) mutated = mutation.ga_mutation(solution, minmax) fitness = common.fitness(mutated[0]) if fitness <= best_fitness: best_fitness = fitness solution = mutated common.write_data(gen, fitness, 'ga.dat') if fitness == 0: break print("#########################") print("# Strategy : Genetic Algorithms") print("# Generations : " + str(gens)) print("# Best Solution Fitness : %.3f" % best_fitness) print("# Log File : ./ga.dat") print("# Graph : Genetic_Algorithm_Ackleys_Function.png") print("#########################") common.plot('Genetic Algorithm: Ackleys Function', 'ga.dat')
def evolutionary_strategies(args): minmax = (args.min, args.max) N = args.N gens = args.gens solution = common.initialize(N,minmax) best_fitness = common.fitness(solution[0]) p = 1.5 for gen in range(1, gens): if gen % 10 == 0: print("Generation :#%d" % gen) mutated = mutation.es_mutation(solution, minmax, p) #print(solution[0]) #print(mutated[0]) #print(list(map(operator.sub, mutated[0], solution[0]))) fitness = common.fitness(mutated[0]) if fitness <= best_fitness: best_fitness = fitness solution = mutated p = 1.5 common.write_data(gen, fitness, 'es.dat') #elif fitness == best_fitness: # p = 1 else: p = 1.5 ** (-1/4) if fitness == 0: break print("#########################") print("# Strategy : Evolutionary Strategies") print("# Generations : " + str(gens)) print("# Best Solution Fitness : %.3f" % best_fitness) print("# Log File : ./es.dat") print("# Graph : Evolutionary_Strategies_Ackleys_Function.png") print("#########################") common.plot('Evolutionary Strategies: Ackleys Function', 'es.dat')
clf_r.append(fit_kmp(X_tr, y_tr, X_te, y_te, "random", opts, random_state=0)) clf_b.append(fit_kmp(X_tr, y_tr, X_te, y_te, "balanced", opts, random_state=0)) clf_s.append(fit_kmp(X_tr, y_tr, X_te, y_te, "stratified", opts, random_state=0)) rs = np.vstack([clf.validation_scores_ for clf in clf_r]) bs = np.vstack([clf.validation_scores_ for clf in clf_b]) ss = np.vstack([clf.validation_scores_ for clf in clf_s]) pl.figure() error_bar = len(args) == 2 plot(pl, clf_r[0].iterations_, rs.mean(axis=0), rs.std(axis=0), "Random", error_bar) plot(pl, clf_b[0].iterations_, bs.mean(axis=0), bs.std(axis=0), "Balanced", error_bar) plot(pl, clf_s[0].iterations_, ss.mean(axis=0), ss.std(axis=0), "Stratified", error_bar) pl.xlabel('Iteration') pl.ylabel('Accuracy') pl.legend(loc='lower right') pl.show()
def show(columns, figure, start, end): col = np.array(columns[6][1:]).astype(np.int) row = np.array(columns[7][1:]).astype(np.int) jfiltered1 = np.array(columns[2][1:]).astype(np.int) jfiltered2 = np.array(columns[3][1:]).astype(np.int) raw1 = np.array(columns[0][1:]).astype(np.int) raw2 = np.array(columns[1][1:]).astype(np.int) [filtered1, levels1, markers11, markers12, blink_points1], \ [filtered2, levels2, markers21, markers22, blink_points2] = process(raw1, raw2) raw1 = scipy.signal.detrend(raw1) raw2 = scipy.signal.detrend(raw2) raw1 = raw1[start:end] raw2 = raw2[start:end] blink_points1 = [ i - start if start <= i < end else 0 for i in blink_points2 ] blink_points2 = [ i - start if start <= i < end else 0 for i in blink_points2 ] blink_values1 = [filtered1[i + start] for i in blink_points1] blink_values2 = [filtered2[i + start] for i in blink_points2] markers11 = markers11[start:end] markers21 = markers21[start:end] markers12 = markers12[start:end] markers22 = markers22[start:end] channel1 = filtered1[start:end] channel2 = filtered2[start:end] jfiltered1 = jfiltered1[start:end] jfiltered2 = jfiltered2[start:end] row = [4 - x for x in row[start:end]] col = [4 - x for x in col[start:end]] levels1 = levels1[start:end] levels2 = levels2[start:end] print 'Accuracy for Horizontal is %.2f%% and Vertical is %.2f%%' \ % (get_accuracy(levels1, col), get_accuracy(levels2, row)) # plot(figure, 211, jfiltered1, 'lightblue', window=len(jfiltered1)) plot(figure, 211, channel1, 'blue', window=len(channel1)) # plot(figure, 211, markers11, 'yellow', window=len(markers11), twin=True) # plot(figure, 211, markers12, 'orange', window=len(markers12), twin=True) plot(figure, 211, blink_values1, 'red', x=blink_points1, window=len(channel1)) # plot(figure, 211, col, 'orange', window=len(col), twin=True) # plot(figure, 211, levels1, 'lightblue', window=len(levels1), twin=True) plot(figure, 211, raw1, 'lightblue', window=len(raw1), twin=True) # plot(figure, 212, jfiltered2, 'lightgreen', window=len(jfiltered2)) plot(figure, 212, channel2, 'green', window=len(channel2)) # plot(figure, 212, markers21, 'yellow', window=len(markers21), twin=True) # plot(figure, 212, markers22, 'orange', window=len(markers22), twin=True) plot(figure, 212, blink_values2, 'red', x=blink_points2, window=len(channel2)) # plot(figure, 212, row, 'orange', window=len(row), twin=True) # plot(figure, 212, levels2, 'lightgreen', window=len(levels2), twin=True) plot(figure, 212, raw2, 'lightgreen', window=len(raw2), twin=True)
import numpy as np import kmeans import common import naive_em import em X = np.loadtxt("toy_data.txt") K = 4 seeds = [0, 1, 2, 3, 4] for seed in seeds: mixture, post = common.init(X, K, seed) # kmixture, kpost, kcost = kmeans.run(X, mixture, post) # title = f"K is {K}, seed is {seed}, cost is {kcost}" em_mixture, em_post, em_cost = naive_em.run(X, mixture, post) with_bic = common.bic(X, em_mixture, em_cost) title = f"K is {K}, seed is {seed}, em_cost is {em_cost}, with_bic is {with_bic}" print(title) common.plot(X, em_mixture, em_post, title) # TODO: Your code here
def signalFilter(self, data_dict): self.filter_bar_text.setVisible(True) self.filter_bar.setVisible(True) self.filter_bar.setValue(0) self.filter_btn.setVisible(True) # ----------------------以上为图形化界面相关,以下为算法相关 for j in range(len(data_dict)): # 数据字典数组中共40个字典 # 脑电 for i in range(32): EEG = data_dict[j]["EEG" + str(i)] # 从字典中还原出list EEG = EEGFilter(EEG) data_dict[j]["EEG" + str(i)] = EEG if self.is_save_file: plot(EEG, r"E:\result\Signalclear\EEG\\", "filter_EEG_" + str(j) + "_" + str(i)) # 眼动信号EOG信号 32:EOGh 33:EOGv EOGh = data_dict[j]['EOGh'] EOGh = EOGhFilter(EOGh) data_dict[j]['EOGh'] = EOGh if self.is_save_file: plot(EOGh, r"E:\result\Signalclear\EOGh\\", "filter_EOGh_" + str(j)) EOGv = data_dict[j]['EOGv'] EOGv = EOGvFilter(EOGv) data_dict[j]['EOGv'] = EOGv if self.is_save_file: plot(EOGv, r"E:\result\Signalclear\EOGv\\", "filter_EOGv_" + str(j)) # 肌电信号 34:EMGz 颧肌 35:EMGt 斜方肌 EMGz = data_dict[j]['EMGz'] EMGz = EMGzFilter(EMGz) data_dict[j]['EMGz'] = EMGz if self.is_save_file: plot(EMGz, r"E:\result\Signalclear\EMGz\\", "filter_EMGz_" + str(j)) EMGt = data_dict[j]['EMGt'] EMGt = EMGtFilter(EMGt) data_dict[j]['EMGt'] = EMGt if self.is_save_file: plot(EMGt, r"E:\result\Signalclear\EMGt\\", "filter_EMGt_" + str(j)) # 皮肤电信号 GSR 36 GSR = data_dict[j]['GSR'] GSR = GSRFilter(GSR) data_dict[j]['GSR'] = GSR if self.is_save_file: plot(GSR, r"E:\result\Signalclear\GSR\\", "filter_GSR_" + str(j)) # 呼吸 RSP RSP = data_dict[j]['RSP'] RSP = RSPFilter(RSP) data_dict[j]['RSP'] = RSP if self.is_save_file: plot(RSP, r"E:\result\Signalclear\RSP\\", "filter_RSP_" + str(j)) # 光电脉搏 PPG PPG = data_dict[j]['PPG'] PPG = PPGFilter(PPG) data_dict[j]['PPG'] = PPG if self.is_save_file: plot(PPG, r"E:\result\Signalclear\PPG\\", "filter_PPG_" + str(j)) # 皮温 SKT = data_dict[j]['SKT'] SKT = SKTFilter(SKT) data_dict[j]['SKT'] = SKT if self.is_save_file: plot(SKT, r"E:\result\Signalclear\SKT\\", "filter_SKT_" + str(j)) self.filter_bar.setValue((j + 1.0) / len(data_dict) * 100) # 进度条 data_dict_clear = data_dict return data_dict_clear # 返回:数据字典数组
requires_grad=True, dtype=torch.float32).cuda() torch_out = model(x) torch.onnx.export(model, x, "edsr.onnx", export_params=True, input_names=['LR'], output_names=['PRED']) input = torch.from_numpy(np.ones([3, IMAGE_SIZE, IMAGE_SIZE])) input = input.type(torch.float) input = input.view([-1, 3, IMAGE_SIZE, IMAGE_SIZE]).cuda() it, times = [], [] total_time = 0 for i in range(ITERATION): t1 = time.time() pred = model(input) t2 = time.time() total_time += t2 - t1 it.append(i) times.append(t2 - t1) plot( it, times, 'torch.png', 'Pytorch {} inference avg: {0:.4f}'.format(IMAGE_SIZE, total_time / ITERATION), 'Iteration', 'time', ['pytorch'])
if perc_label == 1.0: acc_semi[i, j] = acc_sup[i, j] else: clf = fit_kmp(X_l, y_l, X_te, y_te, X_all, opts, j) #acc_semi[i, j] = clf.validation_scores_[-1] acc_semi[i, j] = clf.best_score_ j += 1 error_bar = len(args) == 2 # 2-d plot pl.figure() plot(pl, amounts, acc_sup.mean(axis=1), acc_sup.std(axis=1), "Supervised", error_bar) plot(pl, amounts, acc_semi.mean(axis=1), acc_semi.std(axis=1), "Semi-supervised", error_bar) pl.xlabel('Percentage of labeled data') if opts.regression: pl.ylabel('MSE') pl.legend(loc='upper right') else: pl.ylabel('Accuracy') pl.legend(loc='lower right') pl.show()
import numpy as np from bs4 import BeautifulSoup import common def parseKML(inputfile): with open(inputfile, "r") as f: soup = BeautifulSoup(f, features="html.parser") return [np.array([i.split(",")[0:2] for i in node.string.split()]).astype(np.float).tolist() \ for node in soup.findAll("coordinates")] if __name__ == "__main__": import getNDVI for array in parseKML("2017_polygons.kml"): ndvi = getNDVI.arrayToNDVI(array, "2017-05-01", "2017-09-30", returnDates=True, CLOUDY_PIXEL_PERCENTAGE=100) for i in ndvi[1]: print(i, end=' ') print() common.plot(ndvi[0])
print("Simulation time: %fms" % ((sim_end_time - sim_start_time) * 1000.0)) if not params["use_genn_recording"]: start_timesteps = np.arange(0.0, params["record_time_ms"], params["timestep_ms"]) end_timesteps = np.arange(params["duration_ms"] - params["record_time_ms"], params["duration_ms"], params["timestep_ms"]) start_exc_spikes = convert_spikes(start_exc_spikes, start_timesteps) start_inh_spikes = convert_spikes(start_inh_spikes, start_timesteps) end_exc_spikes = convert_spikes(end_exc_spikes, end_timesteps) end_inh_spikes = convert_spikes(end_inh_spikes, end_timesteps) if params["measure_timing"]: print("\tInit:%f" % (1000.0 * model.init_time)) print("\tSparse init:%f" % (1000.0 * model.init_sparse_time)) print("\tNeuron simulation:%f" % (1000.0 * model.neuron_update_time)) print("\tPresynaptic update:%f" % (1000.0 * model.presynaptic_update_time)) print("\tPostsynaptic update:%f" % (1000.0 * model.postsynaptic_update_time)) # ---------------------------------------------------------------------------- # Plotting # ---------------------------------------------------------------------------- plot(start_exc_spikes, start_inh_spikes, end_exc_spikes, end_inh_spikes, start_stimulus_times, start_reward_times, end_stimulus_times, end_reward_times, 2000.0, params) # Show plot plt.show()
_htitle = ';' + _plotConfig.titleX + ';' + _plotConfig.titleY ## plot plot( **{ 'histograms': _plotConfig.hists, 'title': _htitle, 'labels': _labels, 'legXY': [ Lef + (1 - Rig - Lef) * 0., (1 - Top) + Top * 0.10, Lef + (1 - Rig - Lef) * 1., (1 - Top) + Top * 0.9 ], 'outputs': [ OUTDIR + '/' + _plotConfig.outputName + '.' + _tmp for _tmp in EXTS ], 'ratio': _plotConfig.ratio, 'logY': _plotConfig.logY, 'autoRangeX': _plotConfig.autoRangeX, }) del _plotConfig
def test_sentiment_analysis_classification(n_estimators, C): train, test = setup(dataset_path=OBJ_SUB_PATH, pos_tag_path=OBJ_SUB_POS_TAGGING_PATH) print('===============================') print('Test sentiment analysis:') random_forest_accs, svm_accs, selected_features = test_sentiment_analysis( train, test, n_estimators=n_estimators, C=C) random_forest_max_acc_idx, random_forest_max_ppv_idx, random_forest_max_npv_idx = common.max_accuracy( random_forest_accs) print('Random Forest: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'. format( random_forest_max_acc_idx, random_forest_accs[random_forest_max_acc_idx].acc, random_forest_max_ppv_idx, random_forest_accs[random_forest_max_ppv_idx].ppv, random_forest_max_npv_idx, random_forest_accs[random_forest_max_npv_idx].npv, )) print('Random Forest Best {} features: {}'.format( random_forest_max_acc_idx + 1, ', '.join( common.best_feature_names( named_features, 'sentiment_analysis', selected_features[random_forest_max_acc_idx])))) svm_max_acc_idx, svm_max_ppv_idx, svm_max_npv_idx = common.max_accuracy( svm_accs) print('SVM: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'.format( svm_max_acc_idx, svm_accs[svm_max_acc_idx].acc, svm_max_ppv_idx, svm_accs[svm_max_ppv_idx].ppv, svm_max_npv_idx, svm_accs[svm_max_npv_idx].npv, )) print('SVM Best {} features: {}'.format( svm_max_acc_idx + 1, ', '.join( common.best_feature_names(named_features, 'sentiment_analysis', selected_features[svm_max_acc_idx])))) common.plot(xs=[[i + 1 for i in range(len(random_forest_accs))] for _ in range(3)], ys=[ [acc.acc for acc in random_forest_accs], [acc.ppv for acc in random_forest_accs], [acc.npv for acc in random_forest_accs], ], colors=[ 'bs-', 'gs-', 'rs-', ], x_label='#features', y_label='accuracy', func_labels=[ 'accuracy', 'ppv', 'npv', ], title='Random Forest (#estimators={})'.format(n_estimators), save=os.path.join(GRAPHS_DIR, 'SentimentAnalysis', 'random_forest.png')) common.plot(xs=[[i + 1 for i in range(len(svm_accs))] for _ in range(3)], ys=[ [acc.acc for acc in svm_accs], [acc.ppv for acc in svm_accs], [acc.npv for acc in svm_accs], ], colors=[ 'bs-', 'gs-', 'rs-', ], x_label='#features', y_label='accuracy', func_labels=[ 'accuracy', 'ppv', 'npv', ], title='SVM (C={})'.format(C), save=os.path.join(GRAPHS_DIR, 'SentimentAnalysis', 'SVM.png')) num_of_features_rf = sorted( list( set([ random_forest_max_acc_idx, random_forest_max_ppv_idx, random_forest_max_npv_idx ]))) num_of_features_svm = list( set([svm_max_acc_idx, svm_max_ppv_idx, svm_max_npv_idx])) best_acc_results = [] for x in num_of_features_rf: best_acc_results.append(round(random_forest_accs[x].acc, 3)) for x in num_of_features_svm: best_acc_results.append(round(svm_accs[x].acc, 3)) best_ppv_results = [] for x in num_of_features_rf: best_ppv_results.append(round(random_forest_accs[x].ppv, 3)) for x in num_of_features_svm: best_ppv_results.append(round(svm_accs[x].ppv, 3)) best_npv_results = [] for x in num_of_features_rf: best_npv_results.append(round(random_forest_accs[x].npv, 3)) for x in num_of_features_svm: best_npv_results.append(round(svm_accs[x].npv, 3)) best_results = [best_acc_results, best_ppv_results, best_npv_results] common.plot_table( title='Best Results', cells=best_results, column_names=['RF ({})'.format(x + 1) for x in num_of_features_rf] + ['SVM ({})'.format(x + 1) for x in num_of_features_svm], row_names=[ 'accuracy', 'ppv', 'npv', ], save=os.path.join(GRAPHS_DIR, 'SentimentAnalysis', 'best_result_table.png'), )
def test_disaster_classification(n_estimators, Cs): train, test = setup() train_corpus = numpy.array([tweet.text for tweet in train]) test_corpus = numpy.array([tweet.text for tweet in test]) train_labels = numpy.array([tweet.label for tweet in train]) test_labels = numpy.array([tweet.label for tweet in test]) print('===============================') print('Test unigrams:') uni_random_forest_accuracies, uni_naive_bayes_accuracy = test_bag_of_words( train_corpus, test_corpus, train_labels, test_labels, n_estimators) print('===============================') print('Test unigrams and bigrams:') bi_random_forest_accuracies, bi_naive_bayes_accuracy = test_bag_of_words( train_corpus, test_corpus, train_labels, test_labels, n_estimators, ngram_range=(1, 2)) forest_uni_max_acc_idx, forest_uni_max_ppv_idx, forest_uni_max_npv_idx = common.max_accuracy( uni_random_forest_accuracies) print( 'Forest uni: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'.format( forest_uni_max_acc_idx, uni_random_forest_accuracies[forest_uni_max_acc_idx].acc, forest_uni_max_ppv_idx, uni_random_forest_accuracies[forest_uni_max_ppv_idx].ppv, forest_uni_max_npv_idx, uni_random_forest_accuracies[forest_uni_max_npv_idx].npv, )) forest_bi_max_acc_idx, forest_bi_max_ppv_idx, forest_bi_max_npv_idx = common.max_accuracy( bi_random_forest_accuracies) print( 'Forest bi: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'.format( forest_bi_max_acc_idx, uni_random_forest_accuracies[forest_bi_max_acc_idx].acc, forest_bi_max_ppv_idx, uni_random_forest_accuracies[forest_bi_max_ppv_idx].ppv, forest_bi_max_npv_idx, uni_random_forest_accuracies[forest_bi_max_npv_idx].npv, )) log_n_estimators = numpy.log2(n_estimators) common.plot(xs=[log_n_estimators for _ in range(6)], ys=[ [acc.acc for acc in uni_random_forest_accuracies], [acc.ppv for acc in uni_random_forest_accuracies], [acc.npv for acc in uni_random_forest_accuracies], [acc.acc for acc in bi_random_forest_accuracies], [acc.ppv for acc in bi_random_forest_accuracies], [acc.npv for acc in bi_random_forest_accuracies], ], colors=[ 'bs-', 'gs-', 'rs-', 'bo-', 'go-', 'ro-', ], x_label='#estimators (log2)', y_label='accuracy', func_labels=[ 'unigram accuracy', 'unigram ppv', 'unigram npv', 'bigram accuracy', 'bigram ppv', 'bigram npv', ], title='Random Forest', save=os.path.join( GRAPHS_DIR, 'DisasterClassification', 'random_forest_unigram_vs_bigram_features.png')) print('===============================') print('Test SVM unigrams and bigrams:') svm_uni_accs, svm_bi_accs, svm_uni_pos_accs, svm_bi_pos_accs = test_svm( train, test, Cs) svm_uni_max_acc_idx, svm_uni_max_ppv_idx, svm_uni_max_npv_idx = common.max_accuracy( svm_uni_accs) print('SVM uni: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'.format( svm_uni_max_acc_idx, svm_uni_accs[svm_uni_max_acc_idx].acc, svm_uni_max_ppv_idx, svm_uni_accs[svm_uni_max_ppv_idx].ppv, svm_uni_max_npv_idx, svm_uni_accs[svm_uni_max_npv_idx].npv, )) svm_uni_pos_max_acc_idx, svm_uni_pos_max_ppv_idx, svm_uni_pos_max_npv_idx = common.max_accuracy( svm_uni_pos_accs) print('SVM uni pos: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'. format( svm_uni_pos_max_acc_idx, svm_uni_pos_accs[svm_uni_pos_max_acc_idx].acc, svm_uni_pos_max_ppv_idx, svm_uni_pos_accs[svm_uni_pos_max_ppv_idx].ppv, svm_uni_pos_max_npv_idx, svm_uni_pos_accs[svm_uni_pos_max_npv_idx].npv, )) svm_bi_max_acc_idx, svm_bi_max_ppv_idx, svm_bi_max_npv_idx = common.max_accuracy( svm_bi_accs) print('SVM bi: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'.format( svm_bi_max_acc_idx, svm_bi_accs[svm_bi_max_acc_idx].acc, svm_bi_max_ppv_idx, svm_bi_accs[svm_bi_max_ppv_idx].ppv, svm_bi_max_npv_idx, svm_bi_accs[svm_bi_max_npv_idx].npv, )) svm_bi_pos_max_acc_idx, svm_bi_pos_max_ppv_idx, svm_bi_pos_max_npv_idx = common.max_accuracy( svm_bi_pos_accs) print( 'SVM bi pos: Max acc: {}: {}, Max ppv: {}: {}, Max npv: {}: {}'.format( svm_bi_pos_max_acc_idx, svm_bi_pos_accs[svm_bi_pos_max_acc_idx].acc, svm_bi_pos_max_ppv_idx, svm_bi_pos_accs[svm_bi_pos_max_ppv_idx].ppv, svm_bi_pos_max_npv_idx, svm_bi_pos_accs[svm_bi_pos_max_npv_idx].npv, )) log_Cs = numpy.log10(Cs) common.plot(xs=[log_Cs for _ in range(6)], ys=[ [acc.acc for acc in svm_uni_accs], [acc.ppv for acc in svm_uni_accs], [acc.npv for acc in svm_uni_accs], [acc.acc for acc in svm_uni_pos_accs], [acc.ppv for acc in svm_uni_pos_accs], [acc.npv for acc in svm_uni_pos_accs], ], colors=[ 'bs-', 'gs-', 'rs-', 'bo-', 'go-', 'ro-', ], x_label='#C (log10)', y_label='accuracy', func_labels=[ 'uni_accuracy', 'uni_ppv', 'uni_npv', 'uni_pos_accuracy', 'uni_pos_ppv', 'uni_pos_npv', ], title='SVM', save=os.path.join(GRAPHS_DIR, 'DisasterClassification', 'svm_uni_features.png')) common.plot(xs=[log_Cs for _ in range(6)], ys=[ [acc.acc for acc in svm_bi_accs], [acc.ppv for acc in svm_bi_accs], [acc.npv for acc in svm_bi_accs], [acc.acc for acc in svm_bi_pos_accs], [acc.ppv for acc in svm_bi_pos_accs], [acc.npv for acc in svm_bi_pos_accs], ], colors=[ 'bs-', 'gs-', 'rs-', 'bo-', 'go-', 'ro-', ], x_label='#C (log10)', y_label='accuracy', func_labels=[ 'bi_accuracy', 'bi_ppv', 'bi_npv', 'bi_pos_accuracy', 'bi_pos_ppv', 'bi_pos_npv', ], title='SVM', save=os.path.join(GRAPHS_DIR, 'DisasterClassification', 'svm_bi_features.png')) best_results = [ [ round(uni_naive_bayes_accuracy.acc, 3), round(bi_naive_bayes_accuracy.acc, 3), round(uni_random_forest_accuracies[forest_uni_max_acc_idx].acc, 3), round(bi_random_forest_accuracies[forest_bi_max_acc_idx].acc, 3), round(svm_uni_accs[svm_uni_max_acc_idx].acc, 3), round(svm_uni_pos_accs[svm_uni_pos_max_acc_idx].acc, 3), round(svm_bi_accs[svm_bi_max_acc_idx].acc, 3), round(svm_bi_pos_accs[svm_bi_max_acc_idx].acc, 3), ], [ round(uni_naive_bayes_accuracy.ppv, 3), round(bi_naive_bayes_accuracy.ppv, 3), round(uni_random_forest_accuracies[forest_uni_max_ppv_idx].ppv, 3), round(bi_random_forest_accuracies[forest_bi_max_ppv_idx].ppv, 3), round(svm_uni_accs[svm_uni_max_ppv_idx].ppv, 3), round(svm_uni_pos_accs[svm_uni_pos_max_ppv_idx].ppv, 3), round(svm_bi_accs[svm_bi_max_ppv_idx].ppv, 3), round(svm_bi_pos_accs[svm_bi_max_npv_idx].ppv, 3), ], [ round(uni_naive_bayes_accuracy.npv, 3), round(bi_naive_bayes_accuracy.npv, 3), round(uni_random_forest_accuracies[forest_uni_max_npv_idx].npv, 3), round(bi_random_forest_accuracies[forest_bi_max_npv_idx].npv, 3), round(svm_uni_accs[svm_uni_max_npv_idx].npv, 3), round(svm_uni_pos_accs[svm_uni_pos_max_npv_idx].npv, 3), round(svm_bi_accs[svm_bi_max_npv_idx].npv, 3), round(svm_bi_pos_accs[svm_bi_max_npv_idx].npv, 3), ], ] common.plot_table( title='Best Results', cells=best_results, column_names=[ 'Uni NB', 'Bi NB', 'Uni RF', 'Bi RF', 'Uni SVM', 'Uni POS SVM', 'Bi SVM', 'Bi POS SVM', ], row_names=[ 'accuracy', 'ppv', 'npv', ], save=os.path.join(GRAPHS_DIR, 'DisasterClassification', 'best_result_table.png'), )
kwargs = { 'labelA': inputA_label, 'labelB': inputB_label, 'skipGEN': opts.skip_GEN } # pT plot( output_extensions=EXTS, legXY=[ Lef + (1 - Rig - Lef) * 0.75, Bot + (1 - Bot - Top) * 0.65, Lef + (1 - Rig - Lef) * 0.95, Bot + (1 - Bot - Top) * 0.95 ], stickers=[label_sample, label_var], output=opts.output + '/' + i_sel + '/' + i_met + '_pt', templates=get_templates('AB', histograms, i_sel + i_met + '_pt', **kwargs), logX=True, ratio=True, xMin=10, divideByBinWidth=True, normalizedToUnity=True, title=';MET [GeV];Fraction Of Events', ) # phi plot( output_extensions=EXTS, legXY=[ Lef + (1 - Rig - Lef) * 0.75, Bot + (1 - Bot - Top) * 0.05, Lef + (1 - Rig - Lef) * 0.95, Bot + (1 - Bot - Top) * 0.35
[common.fastLatLonImg(ee.Image(l_NDVI.get(NDVI)), area) for NDVI in sorted_pairs]) LatLonImgsSAR = ee.List([LatLonImgVHVV(ee.Image(l_SAR.get(SAR)), area) for SAR in range(SAR_size)]) both_lists = ee.List([LatLonImgsNDVI, LatLonImgsSAR]).getInfo() LatLonImgsNDVI = both_lists[0] LatLonImgsSAR = both_lists[1] for NDVI in range(len(LatLonImgsNDVI)): i = sorted_pairs[NDVI] for SAR in pairs_i[i]: ndvi_temp = (LatLonImgsNDVI[NDVI][0], LatLonImgsNDVI[NDVI][1], LatLonImgsNDVI[NDVI][2]) if SAR not in precomputed_SAR: lats = LatLonImgsSAR[SAR][0] lons = LatLonImgsSAR[SAR][1] vh = LatLonImgsSAR[SAR][2] vv = LatLonImgsSAR[SAR][3] precomputed_SAR[SAR] = [] precomputed_SAR[SAR].append((lats, lons, vh) + (f'SAR (VH) {l_SAR_dates[SAR]:%B %d, %Y}',)) precomputed_SAR[SAR].append((lats, lons, vv) + (f'SAR (VV) {l_SAR_dates[SAR]:%B %d, %Y}',)) arr.append(ndvi_temp + (f'NDVI {l_NDVI_dates[NDVI]:%B %d, %Y}',)) arr.extend(precomputed_SAR[SAR]) return rasteriser.rasteriseImages(arr) if __name__ == "__main__": import reader for array in reader.parseKML("2017_polygons.kml"): p = arrayToPairs(array, "2017-05-01", "2017-09-30") common.plot(p[0])
import numpy as np import kmeans import common import naive_em import em X = np.loadtxt("toy_data.txt") for i in range(4): for j in range(5): initial_mixture, post = common.init(X, i + 1, j) #M, L, cost_final = kmeans.run(X, initial_mixture, post) #title = "K means for K "+str(i+1)+" seed " +str(j) #common.plot(X, M, L, title) #print("For K "+ str(i+1) + " seed " + str(j) +" cost is " + str(cost_final)) M, L, likelihood = naive_em.run(X, initial_mixture, post) bic = common.bic(X, M, likelihood) title = "EM for K " + str(i + 1) + " seed " + str(j) common.plot(X, M, L, title) print("For K " + str(i + 1) + " seed " + str(j) + " likelihood is " + str(likelihood) + " bic is " + str(bic))
X_test, y_test, opts.n_folds, not opts.regression): clf = fit_kmp(X_tr, y_tr, X_te, y_te, opts, random_state=0) clfs.append(clf) vs = np.vstack([clf.validation_scores_ for clf in clfs]) ts = np.vstack([clf.training_scores_ for clf in clfs]) pl.figure() error_bar = len(args) == 2 plot(pl, clf.iterations_, vs.mean(axis=0), vs.std(axis=0), "Test set", error_bar) plot(pl, clf.iterations_, ts.mean(axis=0), ts.std(axis=0), "Train set", error_bar) pl.xlabel('Iteration') if opts.regression: pl.ylabel('MSE') pl.legend(loc='upper right') else: pl.ylabel('Accuracy') pl.legend(loc='lower right')
clf_ks.append(fit_kmp(X_tr, y_tr, X_te, y_te, components, opt_dict, opts.regression, random_state=j)) j += 1 ss = np.vstack([clf.validation_scores_ for clf in clf_s]) kgs = np.vstack([clf.validation_scores_ for clf in clf_kg]) if not opts.regression: kbs = np.vstack([clf.validation_scores_ for clf in clf_kb]) kss = np.vstack([clf.validation_scores_ for clf in clf_ks]) pl.figure() plot(pl, clf_s[0].iterations_, ss.mean(axis=0), ss.std(axis=0), "Selected", opts.bars) plot(pl, clf_kg[0].iterations_, kgs.mean(axis=0), kgs.std(axis=0), "K-means global", opts.bars) if not opts.regression: plot(pl, clf_kb[0].iterations_, kbs.mean(axis=0), kbs.std(axis=0), "K-means balanced", opts.bars) plot(pl, clf_ks[0].iterations_, kss.mean(axis=0), kss.std(axis=0), "K-means stratified", opts.bars) pl.xlabel('Iteration')
seeds = [0, 1, 2, 3, 4] BICs = np.empty(len(Ks)) for i, K in enumerate(Ks): k_best_mix, k_best_post, k_best_cost = None, None, np.inf em_best_mix, em_best_post, em_best_ll = None, None, -np.inf for seed in seeds: init_mix, init_post = common.init(X, K, seed) k_mix, k_post, k_cost = kmeans.run(X, init_mix, init_post) em_mix, em_post, em_ll = naive_em.run(X, init_mix, init_post) if k_cost < k_best_cost: k_best_mix, k_best_post, k_best_cost = k_mix, k_post, k_cost if em_ll > em_best_ll: em_best_mix, em_best_post, em_best_ll = em_mix, em_post, em_ll BICs[i] = common.bic(X, em_best_mix, em_best_ll) common.plot(X, k_best_mix, k_best_post, "K-means K={}".format(K)) common.plot(X, em_best_mix, em_best_post, "EM K={}".format(K)) print("BICs: ", BICs) print("Best BIC: ", np.max(BICs)) print("Best K: ", Ks[np.argmax(BICs)]) X = np.loadtxt("netflix_incomplete.txt") K = 12 seeds = [0, 1, 2, 3, 4] em_best_mix, em_best_post, em_best_ll = None, None, -np.inf for seed in seeds: init_mix, init_post = common.init(X, K, seed) em_mix, em_post, em_ll = em.run(X, init_mix, init_post)
def evolutionary_strategies(args): minmax = common.get_minmax(args.fitness) N = args.N gens = args.gens population_exchange = args.exchange exchange_individuals = args.iexchange islands = args.islands population_list = [] fittest_list = [] best_fitness_list = [] for i in range(0,islands): population = common.initialize(N,minmax) fittest, best_fitness = common.fittest(population[0], args.fitness) population_list.append(population) fittest_list.append(fittest) best_fitness_list.append(best_fitness) p_list = [0] * islands successful_cases_list = [0] * islands for gen in range(1, gens): if gen % (gens / 10) == 0: print("Generation :#%d" % gen) if gen % population_exchange == 0: print(" -> Exchange in Generation: %d " % gen) population_list = common.exchange(population_list, exchange_individuals) mutated = [[]] * islands local_fittest = [0] * islands fitness = [0] * islands for i in range(0, islands): mutated[i] = mutation.es_mutation(population_list[i], minmax, p_list[i]) local_fittest[i], fitness[i] = common.fittest(mutated[i][0], args.fitness) if fitness[i] >= best_fitness_list[i]: fittest_list[i] = local_fittest[i] best_fitness_list[i] = fitness[i] successful_cases_list[i] += 1 population = mutated common.write_data(gen, fitness[i], 'es%d.dat' % i) p_list[i] = successful_cases_list[i] / gen print("#########################") print("# Strategy : Evolutionary Strategies") print("# Generations : " + str(gens)) fittest = 0 best_fitness = 0 best_island = 0 for i in range(0, islands): if best_fitness_list[i] > best_fitness: fittest = fittest_list[i] best_fitness = best_fitness_list[i] best_island = i print("# Best Solution Value : %.3f" % fittest) print("# Best Solution Fitness : %g" % best_fitness) print("# Obtained from Island : %d" % best_island) print("# Log File : ./es%d.dat" % best_island) print("# Graph : Evolutionary_Strategies_%s.png" % args.fitness.upper()) print("#########################") common.plot('Evolutionary Strategies %s' % args.fitness.upper(), 'es%d.dat' % best_island) files = [] for island in range(0, islands): common.plot('Evolutionary Strategies %s_islands' % args.fitness.upper(), 'es%d.dat' % island, False)
import numpy as np import kmeans import common import naive_em import em X = np.loadtxt("toy_data.txt") # TODO: Your code here for i in range(1, 5): costs = [] for j in range(5): mixture, post = common.init(X, i, j) _, _, cost = kmeans.run(X, mixture, 0) costs.append(cost) common.plot(X, mixture, post, 'test') print(min(costs))
def do_inference(context, bindings, inputs, outputs, stream): [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs] context.execute_async_v2(bindings=bindings, stream_handle=stream.handle) [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs] stream.synchronize() return [out.host for out in outputs] it, times = [], [] total_time = 0 for i in range(ITERATION): t1 = time.time() trt_outputs = do_inference(context=context, bindings=bindings, inputs=inputs, outputs=outputs, stream=stream) t2 = time.time() total_time += t2 - t1 it.append(i) times.append(t2 - t1) plot( it, times, 'tensorrt.png', 'TensorRT {} inference avg: {0:.4f}'.format(IMAGE_SIZE, total_time / ITERATION), 'Iteration', 'time', ['tensorrt'])
import em from scipy.stats import multivariate_normal X = np.loadtxt("toy_data.txt") Ks = [1, 2, 3, 4] seeds = [0, 1, 2, 3, 4] # ============================================================================= # 2. K-means # ============================================================================= for K in Ks: for seed in seeds: mixture, post = common.init(X, K, seed=seed) # Initialize K-means mixture, post, cost = kmeans.run(X, mixture, post) # K-means common.plot(X, mixture, post, [K, seed]) # Plot initialization print(cost) # ============================================================================= # 3. Expectation–maximization algorithm # ============================================================================= def test_2dgaussian_pdf(X, mu, var): y1 = naive_em.pdf_2dgaussian(X, mu, var) y2 = multivariate_normal.pdf(X, mean=mu.reshape(2, ), cov=var[0]) return all(y1 - y2) < 1e-6 # 2dgaussian mixture, post = common.init(X, 1)
#!/bin/env python3 import common # Function 2 and 3 f2 = open('f2.dat', 'w') f3 = open('f3.dat', 'w') for x in range(0, 20000): fitness_f2 = common.fitness(x * 0.001, 'f2') fitness_f3 = common.fitness(x * 0.001, 'f3') f2.write("%d, %.3f\n" % (x, fitness_f2)) f3.write("%d, %.3f\n" % (x, fitness_f3)) f2.close() f3.close() common.plot('F2', 'f2.dat') common.plot('F3', 'f3.dat') # Function 5 f5 = open('f5.dat', 'w') for x in range(0, 1000): fitness_f5 = common.fitness(x * 0.001, 'f5') f5.write("%d, %f \n" % (x, fitness_f5)) f5.close() common.plot('F5', 'f5.dat')
# Run Naive EM mixtures_EM[i], posts_EM[i], costs_EM[i] = \ naive_em.run(X, *common.init(X, K[k], seeds[i])) # Print lowest cost print("=============== Clusters:", k + 1, "======================") print("Lowest cost using kMeans is:", np.min(costs_kMeans)) print("Highest log likelihood using EM is:", np.max(costs_EM)) # Save best seed for plotting best_seed_kMeans[k] = np.argmin(costs_kMeans) best_seed_EM[k] = np.argmax(costs_EM) # Plot kMeans and EM results common.plot(X, mixtures_kMeans[best_seed_kMeans[k]], posts_kMeans[best_seed_kMeans[k]], title="kMeans") common.plot(X, mixtures_EM[best_seed_EM[k]], posts_EM[best_seed_EM[k]], title="EM") # BIC score for EM bic[k] = common.bic(X, mixtures_EM[best_seed_EM[k]], np.max(costs_EM)) # Print the best K based on BIC print("================= BIC ====================") print("Best K is:", np.argmax(bic) + 1) print("BIC for the best K is:", np.max(bic))
if len(_hists) < 2: continue ## labels and axes titles _titleX, _titleY, _objLabel = _hkey_basename, 'Entries', '' label_obj = get_text(Lef+(1-Rig-Lef)*0.95, Bot+(1-Top-Bot)*0.925, 31, .035, _objLabel) _labels = [label_sample, label_obj] if _divideByBinWidth: _titleY += ' / Bin width' _htitle = ';'+_titleX+';'+_titleY ## plot plot(**{ 'histograms': _hists, 'title': _htitle, 'labels': _labels, 'legXY': [Lef+(1-Rig-Lef)*0.55, Bot+(1-Bot-Top)*0.70, Lef+(1-Rig-Lef)*0.95, Bot+(1-Bot-Top)*0.90], 'outputs': [OUTDIR+'/'+_outname+'.'+_tmp for _tmp in EXTS], 'ratio': True, 'logY': _logY, 'xMin': _xMin, 'xMax': _xMax, 'autoRangeX': True, }) del _hists
def plot(self, img_generator, fig_id=None): samples = img_generator(16) fig = plot(samples, fig_id, shape=self.train.images[0].shape) return fig
X_pred = em.fill_matrix(X, mixture) print(common.rmse(X_gold, X_pred)) print(mixture) #print(em.fill_matrix(X_test ### get the best seed and the best k size that minimizes the cost ## Best seed # Get the lowest cost #optimal_seed_cost = em_total_likelihood_dict[0] #for k, v in em_total_likelihood_dict.items(): # if v > optimal_seed_cost: # optimal_seed_cost = v # else: # optimal_seed_cost = optimal_seed_cost # Get the seed associated with the lowest cost #for k, v in em_total_likelihood_dict.items(): # if v == optimal_seed_cost: # optimal_seed = k #print(em_k_dict) ### Test case for exam mixture = common.GaussianMixture(np.array([[1, 1], [1, 1]]), np.array([0.5, 0.5]), np.array([0.01, 0.99])) post = np.ones((X_experiment.shape[0], 2)) / 2 mixture, post, loglike = em.run(X_experiment, mixture, post) common.plot(X_experiment, mixture, post, "Test case") print(post)
for k in K: seeds = np.array([0, 1, 2, 3, 4]) #k_cost = np.zeros((seeds.shape[0], 2)) min_cost_seed_i = 0 mixtures, posts, costs = [], [], [] for seed_i in range(seeds.shape[0]): mixture, post = common.init(X, k, seeds[seed_i]) mixture, post, cost = kmeans.run(X, mixture, post) mixtures.append(mixture) posts.append(post) costs.append(cost) if seed_i > 0 and cost < costs[seed_i - 1]: min_cost_seed_i = seed_i common.plot(X, mixtures[min_cost_seed_i], posts[min_cost_seed_i], "k-mean k:" + str(k) + " seed:" + str(min_cost_seed_i)) print(k, cost, min_cost_seed_i) for k in K: seeds = np.array([0, 1, 2, 3, 4]) #k_cost = np.zeros((seeds.shape[0], 2)) min_cost_seed_i = 0 mixtures, posts, costs = [], [], [] for seed_i in range(seeds.shape[0]): mixture, post = common.init(X, k, seeds[seed_i]) mixture, post, cost = naive_em.run(X, mixture, post) mixtures.append(mixture) posts.append(post) costs.append(cost) if seed_i > 0 and cost > costs[seed_i - 1]: min_cost_seed_i = seed_i
continue ## labels and axes titles _titleX, _titleY, _objLabel = _hkey_basename, 'Entries', '' label_obj = get_text(Lef + (1 - Rig - Lef) * 0.95, Bot + (1 - Top - Bot) * 0.925, 31, .035, _objLabel) _labels = [label_sample, label_obj] if _divideByBinWidth: _titleY += ' / Bin width' _htitle = ';' + _titleX + ';' + _titleY ## plot plot( **{ 'histograms': _hists, 'title': _htitle, 'labels': _labels, 'legXY': _legXY, 'outputs': [OUTDIR + '/' + _outname + '.' + _tmp for _tmp in EXTS], 'ratio': True, 'logY': _logY, 'autoRangeX': True, }) del _hists
#!/bin/env python3 import common # Function 2 and 3 f2 = open('f2.dat', 'w') f3 = open('f3.dat', 'w') for x in range(0,20000): fitness_f2 = common.fitness(x * 0.001, 'f2') fitness_f3 = common.fitness(x * 0.001, 'f3') f2.write("%d, %.3f\n" % (x, fitness_f2)) f3.write("%d, %.3f\n" % (x, fitness_f3)) f2.close() f3.close() common.plot('F2', 'f2.dat') common.plot('F3', 'f3.dat') # Function 5 f5 = open('f5.dat', 'w') for x in range(0,1000): fitness_f5 = common.fitness(x * 0.001, 'f5') f5.write("%d, %f \n" % (x, fitness_f5)) f5.close() common.plot('F5', 'f5.dat')
import numpy as np import em import common X = np.loadtxt("test_incomplete.txt") X_gold = np.loadtxt("test_complete.txt") X = X_gold K = 4 n, d = X.shape seed = 0 # TODO: Your code here mixture, post = common.init(X, K, seed) mixture, post, l = em.run(X, mixture, post) bic = common.bic(X, mixture, l) print("bic = ", bic) # title = "Incomplete - > K = {}, seed = {}, log likelyhood = {}, bic = {} plot.png".format(K, seed, int(l), int(bic)) title = "test log plot" common.plot(X, mixture, post, title)