def __init__(self, width, height, fps, parent=None): super(MainWindow,self).__init__(parent) self.map = Bootstrap.ImageWidget(width, height, fps) hbox = QtGui.QHBoxLayout() hbox.addWidget(self.map) self.setLayout(hbox)
def configureLogging(moduleName): loggerName = moduleName FORMAT = "%(levelname)s %(asctime)-15s %(message)s" if not logging.getLogger(loggerName).disabled: loglvl = DebugFlags.getLogLevel() logging.basicConfig(format=FORMAT, level=loglvl, filename=os.path.join(Bootstrap.getFlaskLogDir(), moduleName + ".log"))
def test_single_variate_single_dimension(self): # this is a single-variable expressed as a 1-d numpy array # (each element of the array is an instance) X = np.array([1, 2, 3, 4, 5]) B = 2 s = np.mean bootstrap = Bootstrap.Bootstrap(X, s, B) bootstrap.run() assert bootstrap.N == 5 assert bootstrap.B == B
def test_07a(self): X = my_data.get_data() #print(X) s = self.ratio_first_eigenvector_to_sum B = 200 #B = 10 # explore the empirical data... covariance_matrix = np.cov(X, bias=True, rowvar=False) w, v = LA.eig(covariance_matrix) v = np.transpose(v) print("--- empirical data - shape ---") print(X.shape) print("--- empirical data - covariance matrix ---") print(covariance_matrix) print("--- empirical data - eigenvalues ---") print(w) print("--- empirical data - eigenvectors ---") print(v) # prepare to collect data - empty 3-d array num_attributes = X.shape[1] self.eigenvectors = np.empty([0, num_attributes, num_attributes]) # run the bootstrap print("--- run the bootstrap ---") bootstrap = Bootstrap.Bootstrap(X, s, B) bootstrap.add_callback(self.my_callback) [std, sem] = bootstrap.run() print("standard deviation:") print(std) print("standard error of the mean") print(sem) #assert(False) # investigate the results # plot the theta_stars (the measure) from the bootstrap replications # the expectation is this is somewhat gaussian (long tails are not acceptable) print("--- results from bootstrap ---") my_charts.plot_histogram(bootstrap.theta_star, "Count of Occurrences", "Ratio: eigenV1/sum(eigen)", "Histogram - Count of EigenV1/sum") #assert(False) # plot the first two principal component vectors using box-and-whisker # we are looking for (lack of) variability print("first two principal components") print(self.eigenvectors.shape) my_charts.plot_box_and_whisker()
def test_treatment(self): treatment = np.array([94, 197, 16, 38, 99, 141, 23]) s = self.my_s B = 100 # look at the original data... print("Treatment sample size: ", treatment.shape[0], " mean: ", np.mean(treatment), "sem: ", stats.sem(treatment)) # run the bootstrap ### this is incorrect - we actually should run boot strap # on the DIFFERENCE btween Treatment and control bootstrap = Bootstrap.Bootstrap(treatment, s, B) [std, sem] = bootstrap.run()
def test_single_variate(self): # this is a single-variable expressed as a 2-d numpy array # (the attribute is column 0, rows are instances) num_instances = 100 num_attributes = 1 X = np.random.randint(5, size=(num_instances, num_attributes)) s = np.mean B = 2 bootstrap = Bootstrap.Bootstrap(X, s, B) bootstrap.run() assert bootstrap.N == num_instances assert bootstrap.B == B
def test_multi_variate(self): # this is a three-variable expressed as a 2-d numpy array # (the attributes are columns, rows are instances) num_instances = 6 num_attributes = 3 X = np.random.randint(5, size=(num_instances, num_attributes)) s = np.mean B = 2 bootstrap = Bootstrap.Bootstrap(X, s, B) bootstrap.run() assert bootstrap.N == num_instances assert bootstrap.B == B
def run(data, attribute_matrix): ntree = 20 fixedSeed = 0 seed = 7 K = 10 stats = [] partitions = generate_partitions(data, K) for i in range(K): forest = [] print("Running K = " + str(i)) # generate cross validation training (K-1) and evaluation (1) partitions training = [] for p in range(K): if p != i: training = training + partitions[p] evaluation = partitions[i] # run bootstrap and create each decision tree of forest for t in range(ntree): training_set = bs.generate_training_set(training, fixedSeed) #test_set = bs.generate_test_set(training, training_set) fixedSeed += len(data) decisionTree = dt.DecisionTree() # m attributes are used #reduced_matrix = vd.select_m_attributes(attribute_matrix), this is being done on the select_node_id function seed += len(data) dt.select_node_id(decisionTree, training_set, attribute_matrix, False) dt.add_branch(decisionTree, training_set, attribute_matrix) dt.split_examples(decisionTree, training_set, attribute_matrix, False) #print("root attribute selected:" + decisionTree.node_id) #dt.print_tree(decisionTree) forest.append(decisionTree) all_classes = ut.get_classes(attribute_matrix) #ut.evaluate_tree(decisionTree, test_set, all_classes) stats.append(ut.evaluate_forest(forest, evaluation, all_classes)) ut.print_stats(stats, all_classes)
def initConnections(): global db global admindb global occpancydb global sysconfigdb global capturedb if "_dbConnectionsInitialized" not in globals(): global _dbConnectionsInitialized _dbConnectionsInitialized = True mongodb_host = Bootstrap.getDbHost() client = MongoClient(mongodb_host) # Let the connection initialize time.sleep(1) db = client.spectrumdb admindb = client.admindb sysconfigdb = client.sysconfig occpancydb = client.occpancydb capturedb = client.capturedb
def main(): parser = ArgumentParser(description='Sequential Bootstrap Experiments') parser.add_argument('input_file', metavar='input_file', help='a yaml file specifying the required details') parser.add_argument('output_file', metavar='output_file', help='output json suitable for use with the plotting script') args = parser.parse_args() input = yaml.safe_load(open(args.input_file)) results = [{s:{} for s in input['stdevs']} for i in range(input['num_games'])] for i in range(input['num_games']): print i base_game = yaml_builder.construct_game(input['game']) stopping_rule = yaml_builder.construct_stopping_rule(input['stopping_rule'], base_game) for stdev in input['stdevs']: noise_model = yaml_builder.construct_model(stdev, input['noise_model']) matrix, equilibria = add_noise_sequentially(base_game, noise_model, stopping_rule, input['samples_per_step']) sample_game = matrix.toGame() results[i][stdev][0] = [{"profile": eq, "statistic": Regret.regret(base_game, eq), "bootstrap" : Bootstrap.bootstrap(sample_game, eq, Regret.regret, "resample", ["profile"]), "sample_count": sample_game.max_samples } for eq in equilibria] f = open(args.output_file, 'w') f.write(IO.to_JSON_str(results, indent=None))
def cleanLogs(): flaskLogDir = Bootstrap.getFlaskLogDir() os.remove(flaskLogDir + "/" + "spectrumbrowser.log")
BOOTSTRAP_N = 20 # number of bootstrap samples (YOU CAN PLAY AROUND WITH THIS) DATA_START_INDEX = 1 # account for df's named index column 0 (DON'T CHANGE THIS UNLESS YOUR DATASET NEEDS IT) DO_K_SWEEP = True # switch to do sweep of K values using K means to find optimal K OPTIMAL_K = 3 # Iris dataset has 3 clusters (ground truth), change this for different datasets # import data iris = datasets.load_iris() df = pd.DataFrame(data=np.c_[iris['data']], columns=iris['feature_names']) # prepare data (add index column 'flower') prep = Prepare('flower', len(df)).names_join(df) df = prep['df'] labels = prep['labels'] # generate bootstrap samples bts = Bootstrap(df, BOOTSTRAP_SIZE, BOOTSTRAP_N).get_bootstraps() # determine optimal clustering K kmeans = Bootstrap.kmeans_bootstrap(bts, DO_K_SWEEP, BOOTSTRAP_N, DATA_START_INDEX, OPTIMAL_K, MAX_K) # max k determined above becomes optimal k gmm = RunAlgos(3, BOOTSTRAP_N, DATA_START_INDEX, bts, kmeans).run_GMM() agglomerative = RunAlgos(3, BOOTSTRAP_N, DATA_START_INDEX, bts, kmeans).run_Agglomerative() kmeans_ = RunAlgos(3, BOOTSTRAP_N, DATA_START_INDEX, bts, kmeans).run_KMeans() # consensus clustering cc_init = Consensus(kmeans_, gmm, agglomerative, bts, df, DATA_START_INDEX, 3, labels) mats = cc_init.combine_results()
# This software is provided "AS IS." # NIST MAKES NO WARRANTY OF ANY KIND, EXPRESS, IMPLIED # OR STATUTORY, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTY OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT # AND DATA ACCURACY. NIST does not warrant or make any representations # regarding the use of the software or the results thereof, including but # not limited to the correctness, accuracy, reliability or usefulness of # this software. ''' Created on Jun 8, 2015 @author: local ''' import Bootstrap Bootstrap.setPath() import signal import Config import util import argparse import socket import DataStreamSharedState from DataStreamSharedState import MemCache import os import traceback import sys import struct from io import BytesIO import binascii from bson.json_util import dumps import authentication
import sys if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("-t") parser.add_argument("-p") args = parser.parse_args() testDataLocation = args.t prefix = args.p config = setupdefs.parse_msod_config() sys.path.append(config["SPECTRUM_BROWSER_HOME"] + "/services/common") import Bootstrap Bootstrap.setPath() setupdefs.setupSensors(prefix) if not os.path.exists(testDataLocation): print "Please put the test data at ", testDataLocation os._exit(0) import populate_db if not os.path.exists(testDataLocation + "/LTE_UL_DL_bc17_bc13_ts109_p1.dat"): print("File not found " + testDataLocation + "/LTE_UL_DL_bc17_bc13_ts109_p1.dat") else: populate_db.put_data_from_file(testDataLocation + "/LTE_UL_DL_bc17_bc13_ts109_p1.dat")
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT #AND DATA ACCURACY. NIST does not warrant or make any representations #regarding the use of the software or the results thereof, including but #not limited to the correctness, accuracy, reliability or usefulness of #this software. #! /usr/local/bin/python2.7 # -*- coding: utf-8 -*- ''' Created on Jun 24, 2015 @author: mdb4 ''' import Bootstrap Bootstrap.setPath() Bootstrap.setAdminPath() import util import argparse from ResourceDataSharedState import MemCache import psutil import sys import socket import Config import netifaces import Log import time import MemCacheKeys import logging import pwd import os
def initialize(self, map_name): return Bootstrap.bootstrap_pygame(self.map, map_name)
def main(args): import Bootstrap as app global app app = app.Bootstrap(args) app.exec_()
def getPath(x): flaskRoot = Bootstrap.getSpectrumBrowserHome() + "/flask/" return flaskRoot + x
def generate_bootstraps(self): # bootstrapping for n in range(0, self.nTree): b = Bootstrap() b.generate(self.original_dataset) self.bootstraps.append(b) return self.bootstraps
suff = "" tlsdata = LF.ImportData("Data/tls.tot" + suff) tlsdata, nPts = LF.CleanData(tlsdata, constG=constG, constY=constY, constT=constT) original = LF.calculateLossFunction(tlsdata, density, omega, tMatrix, BoltzmannCorrection=BoltzmannCorrection) shuffleIndices = [0, 3, 4, 5, 6, 7, 10] tlskde = BS.tlsKDE() tlskde.fit(tlsdata, shuffleIndices, bandwidth=0.186379686016) losses = [] newtlsdata = np.copy(tlsdata) for bi in xrange(0, bSamples): print "Bootstrapping: working on the {:d} step of {:d}.".format( bi + 1, bSamples) newtlsdata[:, shuffleIndices] = tlskde.sample(nPts) losses.append( LF.calculateLossFunction(newtlsdata, density, omega, tMatrix, BoltzmannCorrection=BoltzmannCorrection))
def initialize(self): Bootstrap.bootstrap_pygame(self.map)
#Form data into complex array FArr=numpy.array(F) RArr=numpy.array(R) ImArr=numpy.array(Im)*1j TotArr=RArr+ImArr #Fit the data fit_result, ParamNames = fit.custom_fitting(F, TotArr, params) Fitted_variables = fit_result.x #Obtain the residuals residuals = fit.res_vec( Fitted_variables, FArr, TotArr) #Bootstrap to get error and generate final model boot_params, corr = boot.strap(residuals, FArr, TotArr, Fitted_variables, ParamNames) result = cir.Z(boot_params, FArr, modelname) boot_generation = result.z Real_Boot_Fit = boot_generation.real Imag_Boot_Fit = boot_generation.imag Thetas_Fit = [] i=0 for x in Real_Boot_Fit: theta = numpy.arctan(Imag_Boot_Fit[i]/Real_Boot_Fit[i]) Thetas_Fit = Thetas_Fit + [theta] i=i+1 ###Lets output this data in a csv### ImArrp = numpy.array(Im) EISData=list(zip(FArr, RArr, -ImArrp, thetas, Real_Boot_Fit, -Imag_Boot_Fit, Thetas_Fit)) EISdf = pd.DataFrame(data = EISData, columns=['F(Hz)', 'R(ohm)','-Im(ohm)', 'Theta(degrees)', 'Fit R(ohm)','Fit -Im(ohm)', 'Fit Theta(degrees)'])
#AND DATA ACCURACY. NIST does not warrant or make any representations #regarding the use of the software or the results thereof, including but #not limited to the correctness, accuracy, reliability or usefulness of #this software. ''' Created on Feb 2, 2015 @author: local ''' import os import logging import memcache import Bootstrap import util from Defines import STATIC_GENERATED_FILE_LOCATION sbHome = Bootstrap.getSpectrumBrowserHome() debug = True disableAuthentication = False # SET This to True for testing. disableSessionIdCheck = False # SET This to False for testing. # Set this to True when generating test cases. generateTestCase = False # Note: In production we will set this to True debugRelaxedPasswords = False # File path to where the unit tests will be generated. # Change this to where you want to generate unit tests. if "mc" not in globals(): mc = memcache.Client(['127.0.0.1:11211'], debug=0)