def plot(x_param, x_label, y_param, y_label, linear=False): global currentFig spear = sp.spearmanr(x_param, y_param, nan_policy='omit') print("Figure %2.1d %13s vs %-13s Spearman: %8.3g pvalue: %8.2g" % (currentFig, y_label, x_label, spear[0], spear[1])) fig = plt.figure(currentFig) # the current figure currentFig += 1 plt.clf() # clear the figure before each run ax = fig.add_subplot(111) # set axes, figure location if linear == True: ax.plot(x_param, y_param, 'o') else: ax.loglog(x_param, y_param, 'o') ax.set_xlabel("%s" % LABELS[x_label], fontsize=15) ax.set_ylabel("%s" % LABELS[y_label], fontsize=15) plt.tight_layout() plt.show() return
def main(): parser = argparse.ArgumentParser() parser.add_argument('-p', '--dimension', type=int, default=DIMENSION_DEFAULT) parser.add_argument('-N', '--num-samples', type=int, default=[60, 70, 80, 100, 150, 250, 500, 850], nargs='+') parser.add_argument('-M', '--average-iterations', type=int, default=AVERAGE_ITERATIONS_DEFAULT) parser.add_argument('-b', '--beta', type=float, default=[1.0, 0.5, 0.2], nargs='+') args = parser.parse_args() p = args.dimension Ns = args.num_samples M = args.average_iterations T = 200 TYLER_MAX_ITERS = 1000 TYLER_NEWTON_STEPS = 750 GAUSSIAN_NEWTON_STEPS = 750 for beta in args.beta: dataset = SyntheticDataset(p=p, Ns=Ns, M=M, beta=beta) metric = JointEstimationDistanceErrorMetric(T=T, dataset=dataset) estimator_objects = [ #estimators.joint.general_loss.MMNewtonJointEstimator( # loss=losses.tyler(dataset.get_dimension()), tolerance=1e-6, max_iters=TYLER_MAX_ITERS, newton_num_steps=TYLER_NEWTON_STEPS, # newton_tol=1e-6, name='Tyler'), # estimators.joint.gauss_loss.NewtonJointEstimator( # newton_num_steps=GAUSSIAN_NEWTON_STEPS, newton_tol=1e-6, name='GMRF Newton'), # estimators.joint.general_loss.MMJointEstimator( # estimators.joint.gauss_loss.InvestJointEstimator(), loss=losses.tyler(dataset.get_dimension()), # tolerance=1e-6, max_iters=TYLER_MAX_ITERS, name='Tyler'), #estimators.joint.gauss_loss.InvestJointEstimator(name='GMRF'), estimators.joint.general_loss.MMNewtonJointEstimator( loss=losses.generalized_gaussian(beta, 1), tolerance=1e-6, max_iters=TYLER_MAX_ITERS, newton_num_steps=TYLER_NEWTON_STEPS, newton_tol=1e-6, name='GG'), estimators.joint.gauss_loss.SampleCovarianceJointEstimator( name='Sample covariance') ] plots.plot_variables_vs_N(estimator_objects, Ns, M, metric, show=False) plots.show()
def plots(self): # traded_volume_over_time(self.num_steps, self.agent_measurements) plots.soc_over_time(self.num_steps, self.soc_list_over_time) plots.households_deficit_overflow(self.num_steps, self.deficit_over_time, self.overflow_over_time) plots.clearing_over_utility_price( self.num_steps, self.utility_price, self.clearing_price_min_avg_max, self.clearing_quantity) plots.clearing_quantity(self.num_steps, self.clearing_quantity) plots.clearing_quantity_over_demand(self.num_steps, self.clearing_quantity, self.household_demand) # If an electrolyzer is present, plot its behaviour. if 'Electrolyzer' in self.model.agents: plots.electrolyzer(self.num_steps, self.model.agents['Electrolyzer']) plots.show()
def main(): datas = map(read_data_json, sys.argv[1:]) # draw_all(datas, 'reactions', [ # 'forward methyl to high bridge', # 'reverse methyl to high bridge', # ]) draw_all(datas, 'species', [ # re.compile(r'^(?!dimer).*$') 'high bridge', 'methyl on dimer' ]) show()
def runTest(xTrain, yTrain, xTest, yTest, arguments, label="NA", index=None): (trainX, trainY) = loadData(xTrain, yTrain) (testX, testY) = loadData(xTest, yTest) # required scaling for rbm trainX = scale(trainX) testX = scale(testX) # save the test concentration series, testY becomes class labels testC = testY if (arguments["multiClass"] == 1): trainY = convertToClasses(trainY) testY = convertToClasses(testY) else: trainY = np.transpose(trainY) testY = np.transpose(testY) logitPred = doLogisticRegression(trainX, trainY, testX, testY, arguments["optimize"], arguments["pickle"]) rbmPred = doRBM(trainX, trainY, testX, testY, arguments["optimize"], arguments["pickle"]) # write the results of the training / optimization phase if (arguments["optimize"] == "new" or arguments["optimize"] == "gs"): if (not os.path.isfile(arguments["label"] + 'train_result.csv')): mode = 'w' writeH = True else: mode = 'a' writeH = False with open(arguments["label"] + 'train_result.csv', mode) as csvfile: header = ['index', 'model_fit', 'rbm_accuracy', 'logit_accuracy'] label = index if not (index==None) else np.mean(testC[:,0]) writer = csv.DictWriter(csvfile, fieldnames=header) if (writeH): writer.writeheader() writer.writerow({'index' : label, 'model_fit' : arguments["optimize"], 'rbm_accuracy' : accuracy_score(testY, rbmPred), 'logit_accuracy' : accuracy_score(testY, logitPred) }) print [label, accuracy_score(testY, rbmPred), accuracy_score(testY, logitPred)] if (arguments["verbose"] == 1): print "LOGISTIC REGRESSION PERFORMANCE" print classification_report(testY, logitPred) print("Accuracy Score: %s\n" % accuracy_score(testY, logitPred)) print "RBM PERFORMANCE" print classification_report(testY, rbmPred) print("Accuracy Score: %s\n" % accuracy_score(testY, rbmPred)) if (arguments["visualize"] == 1): plot.accuracy(testY, logitPred, "Logistic Regression", c=testC) plot.accuracy(testY, rbmPred, "RBM", c=testC) plot.show() if (arguments["predOut"] == 1): np.savetxt("logitPred.csv", logitPred, delimiter=",") np.savetxt("rbmPred.csv", rbmPred, delimiter=",") if (index==None): label = np.mean(testC[:,0]) else: label = index predictions = [(index, 'logistic', logitPred), (index, 'rbm', rbmPred)] rets = [] for l,c,p in predictions: ret = {'label': l, 'clf': c, 'accuracy_score': accuracy_score(testY, p), 'precision_score': precision_score(testY, p), 'recall_score': recall_score(testY, p), 'f1_score': f1_score(testY, p)} rets.append(ret) return rets
import message import mixer import channel import receiver from plots import plot_before_after, plot_before_after_amqam, show # message orig1 = message.sin() orig2 = message.square() m1 = message.sin() m2 = message.square() # mixer m = mixer.AMQAM(m1, m2) # channel m = channel.attenuation(m) m = channel.fading(m) m = channel.gauss_noise(m) # receiver m = receiver.unattenuate(m) m1, m2 = receiver.demuxAMQAM(m) m1 = receiver.lpf(m1) m2 = receiver.lpf(m2) plot_before_after_amqam(orig1, orig2, m1, m2) show()
end = args["end"] start = args["start"] data = np.absolute(data) ctx_pred = data[start:end, 1] f_pred = data[start:end, 0] target = data[start:end, 2] ctx_accuracy = (target == ctx_pred) f_accuracy = (target == f_pred) if (not args["concentration"] == "none"): c = np.genfromtxt(args["concentration"], delimiter=",", dtype="float32") else: c = np.zeros((0, 0)) print("PREDICTION FROM CORTEX") print classification_report(target, ctx_pred) print("Accuracy Score: %s\n" % accuracy_score(target, ctx_pred)) print("PREDICTION FROM FIBERS") print classification_report(target, f_pred) print("Accuracy Score: %s\n" % accuracy_score(target, f_pred)) if (args["visualize"] == 1): plot.accuracy(target, ctx_pred, label="Cortex", c=c) plot.accuracy(target, f_pred, label="Fibers", c=c) plot.show()
# prediction for the generation of the confusion matrix y_pred_dumm = pd.DataFrame( model.predict(X_test_reshaped, batch_size=100), columns=y_test_dumm.columns, ) y_pred = y_pred_dumm.idxmax(axis="columns").astype("category") precision, recall, fscore, support = precision_recall_fscore_support( y_test, y_pred) val = ( y_test.value_counts().rename("support").to_frame().reset_index().merge( pd.DataFrame({ "precision": precision, "recall": recall, "fscore": fscore, "support": support, }), on="support", ).set_index("index")) print(val) # confusion matrix plots.confusion_matrix_tf(y_test, y_pred) plots.show() # print the architecture # from tensorflow.keras.utils import plot_model # plot_model(model, to_file="model.png", show_shapes=True, show_layer_names=True)
start = args["start"] data = np.absolute(data) ctx_pred = data[start:end,1] f_pred = data[start:end,0] target = data[start:end,2] ctx_accuracy = (target == ctx_pred) f_accuracy = (target == f_pred) if (not args["concentration"] == "none"): c = np.genfromtxt(args["concentration"], delimiter=",", dtype="float32") else: c = np.zeros((0,0)) print("PREDICTION FROM CORTEX") print classification_report(target, ctx_pred) print("Accuracy Score: %s\n" % accuracy_score(target, ctx_pred)) print("PREDICTION FROM FIBERS") print classification_report(target, f_pred) print("Accuracy Score: %s\n" % accuracy_score(target, f_pred)) if (args["visualize"] == 1): plot.accuracy(target, ctx_pred, label="Cortex", c=c) plot.accuracy(target, f_pred, label="Fibers", c=c) plot.show()
bgcounts = np.recfromtxt(args.bgfile, comments="#", delimiter="\t", dtype=int, names=True) counts = np.recfromtxt(args.file, comments="#", delimiter="\t", names=True, dtype=int) dp = DataPrep(counts, bgcounts, crop=args.crop, fill=args.fill, normalize=False, down_sampling=args.dsfac) dp.gap_stats() if args.lomb_scargle: # calculate the LombScargle Periodigram in the # frequency range of interessest freq_ls = np.linspace(5.0e-7, 5.0e-4, 500) ls = LombScargle(np.float_(dp.position), np.float_(dp.counts), freq_ls) f, pgram = ls() ax = plots.samples(f, pgram, window_title="Lomb-Scargle Periodigram") ax.semilogy() if args.welch: plots.samples(dp.position, dp.counts, window_title="Raw data") # xlim = (0.0, 5e-5) fs = 1.0 / np.diff(dp.position).min() plots.psd( dp.counts, xlim=args.frange, Fs=fs, NFFT=args.nfft, window=mlab.window_none, window_title="Welch Periodigram", ) plots.samples(dp.position, dp.counts, window_title="Raw data") plots.show()