Beispiel #1
0
def pipeline(path, start, end):
    moni = Monitor.Monitor(path)
    moni.creat_json_file()
    anchor = start
    while anchor < end:
        # print(anchor, end)
        if anchor == 0:
            GeoOPt.geo_opt(path, moni)
        elif anchor == 1:
            if start == 1:
                HF1.hf1_start(path, moni)
            else:
                HF1.hf1(path, moni)
        elif anchor == 2:
            Localization.localization(path, moni)
        elif anchor == 3:
            HF2.hf2(path, moni)
        elif anchor == 4:
            LMP2.lmp2(path, moni)
            if if_skip_rpa() == 1:
                anchor += 1
        elif anchor == 5:
            RPA.rpa(path, moni)
        elif anchor == 6:
            Cluster.cluster(path)
        elif anchor == 7:
            Correction.correction(path, moni)
        elif anchor == 8:
            Results.results(path)
        anchor += 1
    end_programm(path)
Beispiel #2
0
def ativabotoes(botoes_jogo, errados, alterados, jogo, mat, strart_time, times,
                n_select, linha, coluna, ecra, tempo, best_results,
                dificuldade):
    result = False

    if botoes_jogo[0]:
        errados = Logic.constroi_errados(alterados, jogo, mat)
        strart_time = time.time()
        botoes_jogo[0] = False
    else:
        actual_time = time.time() - strart_time
        if actual_time >= times:
            errados = []

    if botoes_jogo[1]:
        jogo = Logic.copia_matriz(mat)
        botoes_jogo[1] = False

    if botoes_jogo[2] and n_select:
        jogo[linha][coluna] = mat[linha][coluna]
        botoes_jogo[2] = False

    if botoes_jogo[3]:
        ecra = 5
        tempo = time.time() - tempo
        result = Print.correto(jogo)
        if result:
            Results.adiciona_lista(tempo, dificuldade, best_results)
        Results.gravar_resultados(best_results)
        botoes_jogo[3] = False

    return errados, strart_time, jogo, ecra, tempo, best_results, result
    def query(self, query):

        if query is None:
            return Results.list_commands()

        # print query

        query_segments = query.lower().split(" ", 1)
        command = query_segments[0]

        if len(query_segments) == 2:
            args = query_segments[1]
        else:
            args = None

        command_suggestions = QueryParser.parse_command(command)

        print(command_suggestions)

        if args is not None and len(command_suggestions) == 1:
            if not self.ensure_connection():
                return
            return Results.list_music(self.__client, self.__album_art_cache,
                                      command_suggestions[0], args)
        else:
            return Results.list_commands(command_suggestions)
def load_model(model_filename):
    print(model_filename)
    try:
        hdf5_route = "/highest_validation_likelihood/parameters"
        params = Results.Results(model_filename).get(hdf5_route)
    except:
        hdf5_route = "/final_model/parameters"
        params = Results.Results(model_filename).get(hdf5_route)
    model_class = getattr(NADE, params["__class__"])
    return model_class.create_from_params(params)
Beispiel #5
0
    def __init__(self, options):
        """ Constructor.  Take the command line options as a parameter """
        self.options = options

        # Initialize some instance vars
        self.consumer_config = None
        self.config = None
        self.logger = None
        self.proxy = None

        # For any messages that won't go through the logger
        self.quiet = 0
        if self.options.verbose == 0:
            self.quiet = 1

        # Instantiate our helper objects
        self.sysutils = Sysutils.Sysutils(self)
        self.results = Results.Results(self, options)

        # Setup the logger
        self.init_logging(self.options.verbose)

        # Setup the initial configuration
        self.setup_config()
        self.setup_consumer_config()
        return
Beispiel #6
0
 def __init__(self, 
     #Set an error value 
     error: float,
     #Set the k number of neighbors
     k: int,
     #Pass in the data type 
     data_type: str,
     #Set the categorical features 
     categorical_features: list,
     #Set a list of features in the regression data set 
     regression_data_set: bool,
     #Set the alpha float 
     alpha:float,
     #Set the beta float 
     beta:float,
     #set the width value 
     h:float,
     #Set the dimensionality 
     d:int):
     # initialize a knn object
     self.knn = kNN.kNN(k, data_type, categorical_features, regression_data_set, alpha, beta, h, d)
     self.nn = kNN.kNN(1, data_type, categorical_features, regression_data_set, alpha, beta, h, d)
     # error threshhold for regression classification
     self.error = error
     # store if this data set is a regression data set (True) or not (False)
     self.regression_data_set = regression_data_set
     self.results = Results.Results()
def calc(datasetIndex, multiplierInt):
    csv = pd.DataFrame(columns=['dataset', 'bins', 'f1', 'zero-one'])
    exp = ((multiplierInt + 1) / 2)
    bins = math.ceil(2**exp)
    results = []
    for k in range(trials):
        dp = DataProcessor.DataProcessor(bin_count=bins)
        binnedDataset = dp.StartProcess(datasets[datasetIndex])
        N, Q, F, testData = train(binnedDataset)

        model = Classifier.Classifier(N, Q, F)
        classifiedData = model.classify(testData)

        stats = Results.Results()
        zeroOne = stats.ZeroOneLoss(classifiedData)
        macroF1Average = stats.statsSummary(classifiedData)
        datapoint = {
            'dataset': dataset_names[datasetIndex],
            'bins': bins,
            'f1': macroF1Average,
            'zero-one': zeroOne / 100
        }
        print(datapoint)
        csv = csv.append(datapoint, ignore_index=True)
        # trial = {"zeroOne": zeroOne, "F1": macroF1Average}
        # results.append(trial)
        # print(trial)
    data.append(csv)
Beispiel #8
0
 def __init__(self, connections, main_connection, recording):
     self.connections = connections
     self.main_connection = main_connection
     self.results = Results.Results()
     self.recording = recording
     self.standby = Standby.Standby()
     self.target_identification = TargetIdentification.TargetIdentification(
         self.connections, self.results, self.standby)
     self.message_counter = 0
Beispiel #9
0
 def showResults(self):
     self.loadData()
     self.__calc.compute()
     gg = self.__calc.getGG()
     pg = self.__calc.getPG()
     tg = self.__calc.getTG()
     gb = self.__calc.getGB()
     pb = self.__calc.getPB()
     tb = self.__calc.getTB()
     neg = self.__calc.getNEG()
     nke = self.__calc.getNKE()
     nkyp = self.__calc.getNKYP()
     degp = self.__calc.getDEGP()
     sgp = self.__calc.getSGP()
     nkyr = self.__calc.getNKYR()
     debp = self.__calc.getDEBP()
     sbp = self.__calc.getSBP()
     fgp = self.__calc.getFGP()
     fbp = self.__calc.getFBP()
     fsum = self.__calc.getFSUM()
     tgres = self.__calc.getTGRES()
     tbres = self.__calc.getTBRES()
     dltpg = self.__calc.getDLTPG()
     dpg = self.__calc.getDPG()
     dltpb = self.__calc.getDLTPB()
     dpb = self.__calc.getDPB()
     qogp = self.__calc.getQOGP()
     qbp = self.__calc.getQBP()
     ril = self.__calc.getReinoldsInputColdThermofor()
     rihg = self.__calc.getReinoldsInputHotThermofor()
     resr = self.__calc.getRESR()
     rebsr = self.__calc.getREBSR()
     mat = self.__calc.getMAT()
     betact1 = self.__calc.getBETACT1()
     cped1 = self.__calc.getCPED1()
     cped2 = self.__calc.getCPED2()
     count = self.__calc.getCOUNT()
     alfag = self.__calc.getHeatCoefColdThermofor()
     alfab = self.__calc.getHeatCoefHotThermofor()
     nusseltg = self.__calc.getNusseltColdThermofor()
     nusseltb = self.__calc.getNusseltHotThermofor()
     prandtlg = self.__calc.getPrandtlColdThermofor()
     prandtlb = self.__calc.getPrandtlHotThermofor()
     self.__resDialog = Results.Results(self.__calc.getCOE())
     self.__resDialog.updateContent(gg, pg, tg, gb, pb, tb, neg, nke, nkyp, degp, sgp, nkyr, debp, sbp, fgp, fbp, \
                                    fsum, tgres, tbres, dltpg, dpg, dltpb, dpb, qogp, qbp, ril, rihg, resr, rebsr, \
                                    mat, betact1, cped1, cped2, count, alfag, alfab, nusseltg, nusseltb, prandtlg, \
                                    prandtlb)
     self.__resDialog.show()
Beispiel #10
0
 def __init__(self):
     self.main_connection = ConnectionPostOfficeEnd.MainConnection()
     self.connections = MasterConnection.MasterConnection()
     self.options = None
     self.results = Results.Results()
     self.standby_state = None
     self.standby_freq = None
     self.no_standby = None
     self.recorded_signals = [None for _ in range(7)]
     self.prev_results = []
     self.prev_results_counter = {}
     self.actual_results = []
     self.actual_results_counter = {}
     self.need_new_target = None
     self.message_counter = None
     self.waitConnections()
Beispiel #11
0
def main():
    print "DEBUG: Entering Driver.main()"

    expGUI = ExperimentGUI() # create new ExperimentGUI object using default values    

    res = Results() # create new Results object using default value
    res.writeToFile(expGUI.window.get_title() + " Experiment\n")
    res.writeToFile("Experiment started at: " + str(datetime.now()) + "\n\n")

    gtk.main()

   # http://www.pygtk.org/dist/pygtk2-tut.pdf
    
    catA = ImageCategory("A", ["A0.jpg", "A1.jpg", "A2.jpg", "A3.jpg", "A4.jpg", "A5.jpg"])
    catB = ImageCategory("B", ["B0.jpg", "B1.jpg", "B2.jpg", "B3.jpg", "B4.jpg"])

    categories = [catA, catB]
    
    lb1 = LearningBlock(["A0.jpg", "A1.jpg", "B2.jpg"], 10.0)
    lb2 = LearningBlock(["B3.jpg", "B4.jpg", "A1.jpg"], 15.0)
    tb1 = TestingBlock(["A0.jpg", "A1.jpg", "B2.jpg"])
    tb2 = TestingBlock(["A3.jpg", "B4.jpg"])

    lblockList = [lb1, lb2]
    tblockList = [tb1, tb2]
    
    lp = LearningPhase(lblockList)
    tp = TestingPhase(tblockList)
    phaseList = [lp, tp]
    exp = Experiment(phaseList)
    exp.runPhases(categories, res)
    
    print "DEBUG: Entering gtk.main()"
    
    #gtk.main()
    
    print "DEBUG: Exiting gtk.main()"

    res.writeToFile("Experiment ended at: " + str(datetime.now()))

    print "DEBUG: Exiting Driver.main()"
Beispiel #12
0
 def run(self, screen):
     self.start(screen)
     while not self.gameover:
         self.move(screen)
         pygame.display.flip()
         if self.Output.score % 1720 == 0:
             self.update_seeds()
         if self.Output.dead == 1:
             self.gameover = True
             with open('nickname.txt', 'r') as f:
                 self.Highscore.insert(f.readline(), self.Output.scorenum)
                 with open('nickname.txt', 'w') as f1:
                     f1.write('\n')
             res = Results.Results()
             res.draw(screen)
             game = True
             while game:
                 for events in pygame.event.get():
                     if events.type == pygame.KEYDOWN:
                         if events.key == pygame.K_ESCAPE or events.key == pygame.K_SPACE:
                             game = False
                     elif events.type == pygame.QUIT:
                         game = False
     sys.exit()
Beispiel #13
0
def main(argv):
   testname = ''

   HlayerSize = 100
   HlayerCount = 2

   nsplits = 3
   
   try:
      opts, args = getopt.getopt(argv,"hi:o:",["tname=","stname="])
   except getopt.GetoptError:
      print 'test.py -t <testname> -hls <Hidden Layer Size> -hlc <Hidden Layer Count> -n <nsplits>'
      sys.exit(2)
   for opt, arg in opts:
      if opt == '-h':
          print 'test.py -t <testname> -hls <Hidden Layer Size> -hlc <Hidden Layer Count> -n <nsplits>'
          sys.exit()
      elif opt in ("-t", "--tname"):
          testname = arg
      elif opt in ("-hls"):
          HlayerSize = arg
      elif opt in ("-hlc"):
          HlayerCount = arg
      elif opt in ("-n"):
          nsplits = arg

    Hlayer = [HlayerSize] * HlayerCount
          
    NB = nb.NaiveBayes(testname = testname,subtestname='naivebayes')
    X,Y= NB.loadMatrixFromFile()
    res.getResults(NB)

    RF = rf.RandomForest(testname=testname,subtestname='randomforest')
    X,Y = RF.loadMatrixFromFile()
    res.getResults(RF)

    NN = nn.NeuralNetwork(testname=testname,subtestname='neuralnetwork',HlayerSizes=Hlayer, nsplits=nsplits)
    X,Y = NN.loadMatrixFromFile()
    res.getResults(NN)
Beispiel #14
0
# Initializing NN
deep_font = DeepFont(shape + (3, ), opt_name='sgd', use_augmentations=True)
deep_font.summarize()

# Training
model_filename_full = model_filename + '.h5'
results = deep_font.train(train_x, train_y, epochs, batch_size, validate_x,
                          validate_y)
print('Model Loss: {L} ; Accuracy: {A}'.format(L=results['evaluation'][0],
                                               A=results['evaluation'][1]))
deep_font.save(model_filename)

plt.figure('Training results: Accurracy & Loss')
for k in results['history']:
    #if k.find('accuracy') != -1:
    plt.plot(results['history'][k], label=k)
plt.legend()
plt.title('Training results: Accurracy & Loss')
plt.show()

if len(validation_filenames) > 0:
    # Executing evaluation
    predictions = deep_font.predict(validate_x)

    # Printing & saving results
    evaluation = deep_font.evaluate(validate_x, validate_y)
    print('Validation Loss: {L} ; Accuracy: {A}'.format(L=evaluation[0],
                                                        A=evaluation[1]))
    Results.store(validate_results_file, predictions, validate_filenames,
                  validate_letters)
               "\n")
print "Running %s runs each for %s samples and %s cols" % (num_runs, sample_range, col_range)

app.create_R(out_dir)
all_results = []
completed = 0
smallest_max_otu_value = app.compute_smallest_max()
for i in range(len(col_range)):
    num_cols = col_range[i]
    taxa_tree = app.create_tree(app.find_usable_length(num_cols, bits) / bits, type = 'T')
    assert app.is_binary_tree(taxa_tree) == True
    for j in range(len(sample_range)):
        num_samples = sample_range[j]
        sample_trees = [None] * num_runs
        for dist in dists:
            results = Results(num_samples, num_cols, dist)
            all_results.append(results)
            ranges = None
            if dist == 'normal':
                ranges = app.get_range_from_normal(num_cols, bits, mean, sd, smallest_max_otu_value)
            else:
                ranges = app.get_range_from_gamma(num_cols, bits, gamma_shape, gamma_scale, smallest_max_otu_value)
            for k in range(num_runs):
                status = "%d/%d: running %d samples and %d cols (%s) %s" % (
                    completed + 1, num_runs * len(dists) * len(sample_range) * len(col_range), num_samples, num_cols,
                    dist, str(timer))

                print status
                log_file.write("%s\n" % status)
                log_file.flush()
Beispiel #16
0
def main():
    #What trial number we are on
    Trial = 0
    #Which set of the data is being used to test
    TestData = 0
    #Print data to the screen so the user knows the program is starting
    print("Program Starting")
    #Prepocessed datasets stored in an array for iteration and experiments, Nosie included
    data_sets = [
        'PreProcessedVoting.csv', 'PreProcessedIris.csv',
        'PreProcessedGlass.csv', 'PreProcessedCancer.csv',
        'PreProcessedSoybean.csv', 'PreProcessedVoting_Noise.csv',
        'PreProcessedIris_Noise.csv', 'PreProcessedGlass_Noise.csv',
        'PreProcessedCancer_Noise.csv', 'PreProcessedSoybean_Noise.csv'
    ]
    #The 5 Data set names are stored in the array, Noise included
    dataset_names = [
        "Vote", "Iris", "Glass", "Cancer", "Soybean", "Vote_Noise",
        "Iris_Noise", "Glass_Noise", "Cancer_Noise", "Soybean_Noise"
    ]

    ####################################################### MACHINE LEARNING PROCESS #####################################################
    #Set the total number of runs to be 10
    TotalRun = 10
    #Create a dataframe that is going to hold key valuesf from the experiment
    finalDataSummary = pd.DataFrame(columns=["Dataset", "F1", "ZeroOne"])
    #For each of the datasets and the data sets including noise
    for dataset in data_sets:
        #Create an empty array
        AvgZeroOne = []
        #Create a second empty array
        AvgF1 = []
        #Get the name of the dataset being experimented on
        datasetName = dataset_names[data_sets.index(dataset)]
        #Print the dataset name so the user knows what data set is being experimented
        print(datasetName)

        #Load in the dataframe from the preprocessed data
        df = pd.read_csv(dataset)
        #Create a Training algorithm Object
        ML = TrainingAlgorithm.TrainingAlgorithm()
        #Bin the data frame into a list of 10 similar sized dataframes for traning and one set to test
        tenFoldDataset = ML.BinTestData(df)

        #Set the total number of runs to be 10 for now
        for i in range(10):
            #Set an empty dataframe object
            TrainingDataFrame = pd.DataFrame()
            #Make One dataframe that is our test Dataframe
            TestingDataFrame = copy.deepcopy(tenFoldDataset[i])
            #For each of the dataframes generated above
            for j in range(10):
                #If the dataframe being accessed is the test dataframe
                if i == j:
                    #Skip it
                    continue
                #Append the training dataframe to one dataframe to send to the ML algorithm
                TrainingDataFrame = TrainingDataFrame.append(copy.deepcopy(
                    tenFoldDataset[j]),
                                                             ignore_index=True)

            # print('************************************************')
            # print(TrainingDataFrame)
            # print(TestingDataFrame)
            # print('************************************************')

            # calculate the N, Q, and F probabiliies
            N, Q, F = train(ML, TrainingDataFrame)
            #Create a Classifier Object to classify our test set
            model = Classifier.Classifier(N, Q, F)
            #Reassign the testing dataframe to the dataframe that has our Machine learning classification guesses implemented
            classifiedDataFrame = model.classify(TestingDataFrame)
            #Create a Results object
            Analysis = Results.Results()

            #Run the 0/1 Loss function on our results
            zeroOnePercent = Analysis.ZeroOneLoss(classifiedDataFrame)
            #Get the F1 score for the given dataset
            macroF1Average = Analysis.statsSummary(classifiedDataFrame)
            #Print the zero one loss  and F1 calculation to the screen
            print("Zero one loss: ", zeroOnePercent, "F1: ", macroF1Average)

            print("\n")
            #append the zero one loss and F1 average to the list to calculate the average score
            AvgZeroOne.append(zeroOnePercent)
            AvgF1.append(macroF1Average)

            #Increment the trial number and the position in the array to use the dataframe to test on
            Trial += 1
            TestData += 1
            #If we are at 10 we only have 10 dataframes 0 - 9 accessed in the array so on the 10th trial go back to the beginning
            if TestData == 10:
                #Set the value to 0
                TestData = 0
        #Gather the dataset name the average scores for ZOloss and F1 score and put them into a data structure
        AvgStats = {
            "Dataset": datasetName,
            "F1": sum(AvgF1) / len(AvgF1),
            "ZeroOne": sum(AvgZeroOne) / len(AvgZeroOne)
        }
        #Set a variavle to hold all of the statistics for each of the trials so we can print them to one file
        finalDataSummary = finalDataSummary.append(AvgStats, ignore_index=True)
        #Write the data set, the trial number and statistics of a trial to be printed to a file
        WriteToAFile(datasetName, AvgStats, Trial)
    finalDataSummary.to_csv("ExperimentalSummary.csv")
    print("Program Finish: \n", finalDataSummary)
Beispiel #17
0
def CmdLine( args ):	
	seriesFileName = None
	if args.series:
		seriesFileName = args.series
		try:
			with open(seriesFileName, 'rb') as fp:
				SeriesModel.model = pickle.load( fp )
		except IOError:
			print u'cannot open series file "{}".'.format(seriesFileName)
			return 1
		SeriesModel.model.postReadFix()

	races = []
	for r in args.races:
		# Parse the points structure.
		pos = r.rfind( '=' )
		if pos >= 0:
			pointStructuresName = r[pos+1:].strip()
			r = r[:pos]
		else:
			pointStructuresName = None

		fileName, sheetName = None, None
		
		# Now, parse the file reference.
		if r.endswith( '.cmn' ):
			# This is a crossmgr file.
			fileName = r
		else:
			# This must be a spreadsheet.
			components = r.split( '::' )
			if len(components) == 2:
				fileName, sheetName = components
			else:
				fileName = components[0]
			if not any( fileName.endswith(suffix) for suffix in ('.xlsx', 'xlsm', '.xls') ):
				print u'unrecognized file suffix "{}".'.format(fileName)
				return 2
				
		pointStructures = None
		for ps in SeriesModel.model.pointStructures:
			if pointStructuresName is None or ps.name == pointStructuresName:
				pointStructures = ps
				break
				
		if pointStructures is None:
			print u'cannot find points structure "{}".'.format(pointStructuresName)
			return 3
		
		races.append( SeriesModel.Race(fileName, pointStructures) )
		
	if races:
		SeriesModel.model.races = races
			
	score_by_points, score_by_time, score_by_percent = True, False, False
	if args.score_by_time:
		score_by_points, score_by_time, score_by_percent = False, True, False
	if args.score_by_percent:
		score_by_points, score_by_time, score_by_percent = False, False, True
		
	output_file = args.output or ((os.path.splitext(args.series)[0] + '.html') if args.series else 'SeriesMgr.html')
	with open( output_file, 'wb' ) as f:
		f.write( Results.getHtml(seriesFileName) )
	
	return 0
Beispiel #18
0
if (__name__ == '__main__'):
    try:
        opts, args = getopt.getopt(sys.argv[1:], "ve", ["verbose", "errors"])
    except getopt.GetoptError, err:
        # print help information and exit:
        print str(err) # will print something like "option -a not recognized"
        sys.exit(2)
    verbose = False
    list_errors = False
    for o, a in opts:
        if o in ("-v", "--verbose"):
            verbose = True
        elif o in ("-e", "--errors"):
            list_errors = True
    s = Simulation('Estimator', verbose)
    r = Results(s, verbose)
    ###########################################
    ###########################################
    # Set type of simulations that need to be run
    elist = {}
    ###########################################
    elist['Cyclic'] = [['1']]
    #elist['Cyclic'][0] += ['mhop']
    elist['Cyclic'][0] += [ ['seq', 4, 5, 6] ]
    ###########################################
    #elist['Cyclic'] += [['1']]
    #elist['Cyclic'][1] += ['mhop']
    #elist['FastCyclic'][1] += [ ['seq', 1, 2, 3, 4, 5] ]
    ###########################################
    #elist['Consensus'] = [['2']]
    #elist['Consensus'][0] += ['unsync']
Beispiel #19
0
csv = pd.DataFrame(columns=['dataset', 'bins', 'f1', 'zero-one'])

for j in range(2):
    for i in range(25):
        exp = ((i+1)/2)
        bins = math.ceil(2**exp)
        results = []
        for k in range(trials):
            dp = DataProcessor.DataProcessor(bin_count=bins)
            binnedDataset = dp.StartProcess(datasets[j])
            N, Q, F, testData = train(binnedDataset)

            model = Classifier.Classifier(N, Q, F)
            classifiedData = model.classify(testData)

            stats = Results.Results()
            zeroOne = stats.ZeroOneLoss(classifiedData)
            macroF1Average = stats.statsSummary(classifiedData)
            csv = csv.append({
                'dataset': dataset_names[j], 
                'bins': bins, 
                'f1': macroF1Average, 
                'zero-one':zeroOne/100
                }, ignore_index=True)
            trial = {"zeroOne": zeroOne, "F1": macroF1Average}
            results.append(trial)
            print(trial)

        z1 = 0
        f1 = 0
        for n in results: