def log_results(results): best_n_tree = {'acc':None, 'train_time':None, 'pred_time':None, 'fMeasure_micro':None, 'fMeasure_macro':None} print('\n---------------------') for n_tree in results.keys(): allValues = {'acc':list(), 'train_time':list(), 'pred_time':list(), 'fMeasure_micro':list(), 'fMeasure_macro':list()} average = 0.0 median = 0.0 percentile_75 = 0.0 percentile_90 = 0.0 percentile_99 = 0.0 standarDeviation = 0.0 for fold in results[n_tree]['folds']: allValues['acc'].append(results[n_tree]['folds'][fold]['acc']) allValues['train_time'].append(results[n_tree]['folds'][fold]['train_time']) allValues['pred_time'].append(results[n_tree]['folds'][fold]['pred_time']) allValues['fMeasure_micro'].append(results[n_tree]['folds'][fold]['fMeasure_micro']) allValues['fMeasure_macro'].append(results[n_tree]['folds'][fold]['fMeasure_macro']) for metric in allValues.keys(): average = Analyzer.calcAverage(allValues[metric]) standarDeviation = Analyzer.calcStandarDeviation(allValues[metric]) percentile_75 = Analyzer.calcPercentile(allValues[metric],75) percentile_90 = Analyzer.calcPercentile(allValues[metric],90) percentile_99 = Analyzer.calcPercentile(allValues[metric],99) median = Analyzer.calcMedian(allValues[metric]) results[n_tree][metric+'_avg'] = average results[n_tree][metric+'_sd'] = standarDeviation results[n_tree][metric+'_perc75'] = percentile_75 results[n_tree][metric+'_perc90'] = percentile_90 results[n_tree][metric+'_perc99'] = percentile_99 results[n_tree][metric+'_median'] = median #print('Average %s for %d trees: %.2f' %(metric, n_tree, average)) #print('Standart Deviation %s for %d trees: %.2f' %(metric, n_tree, standarDeviation)) #print('Perc 75 %s for %d trees: %.2f' %(metric, n_tree, percentile_75)) #print('Perc 90 %s for %d trees: %.2f' %(metric, n_tree, percentile_90)) #print('Perc 99 %s for %d trees: %.2f' %(metric, n_tree, percentile_99)) #print('Median %s for %d trees: %.2f' %(metric, n_tree, median)) if best_n_tree[metric] == None or median >= best_n_tree[metric]['median']: best_n_tree[metric]= {'tree':n_tree, 'median': median} for metric in allValues.keys(): print('Best number of trees: %d with %.2f %s' % (best_n_tree[metric]['tree'], best_n_tree[metric]['median'],metric)) print('---------------------\n')
def get_test_results_newer(clf, test_data, target_column, train_time): analyzer = Analyzer(list(test_data[target_column].unique())) test_input = test_data.drop(target_column, axis=1) test_target = test_data[target_column] prediction_time = 0 for features, target in zip(test_input.iterrows(), test_target): start = time.process_time() prediction = clf.predict(features[1]) end = time.process_time() prediction_time += (end - start) analyzer.addValueInConfusionMatrix(prediction,target) return { 'acc': analyzer.calcAccuracy(), 'fMeasure_micro': analyzer.calcFBethaMeasure(1,"micro"), 'fMeasure_macro': analyzer.calcFBethaMeasure(1,"macro"), 'train_time': train_time, 'pred_time': prediction_time/len(test_data) }
from lsq import fit_components from utils import FourierFilter, Analyzer, find_roots font = dict( size = 14 ) matplotlib.rc('font', **font) B = 10.06 wl = 3e8 / 10.67e9 datafile = "../data/sun-4_3_2014-22.npz" logfile = "../data/logs/sun-4_3_2014-22-log" sun = Analyzer(datafile, logfile, dt=1.0) # First we will remove that raised section at the end (starts at index 26000) sun.slice(0, 26000) # Next we set invalid points (from telescope homing) to the avg_dc sun.flatten_invalid_points() plt.subplot(211) plt.plot(sun["ha"], sun["volts"]) plt.xlabel(r"Hour angle [h]", fontsize=18) plt.ylabel(r"Power", fontsize=18) plt.subplot(212) trans = np.fft.fft(sun["volts"]) freqs = np.fft.fftfreq(len(trans), 2. * np.pi * 1.0 / 86164.)
import matplotlib.pyplot as plt from fringe_funcs import fringe_freq, bessel from lsq import fit_components from utils import FourierFilter, Analyzer, find_roots B = 10.06 wl = 3e8 / 10.67e9 datafile = "../data/moon-4_6_2014-24.npz" logfile = "../data/logs/moon-4_6_2014-24-log" moon = Analyzer(datafile, logfile, dt=1.0, start_at_timestamp="2014-04-06 20:33:14,728") import ephem import datetime damoon = ephem.Moon() obs = ephem.Observer() obs.lat = np.deg2rad(37.8732) obs.long = np.deg2rad(-122.2573) obs.date = ephem.date("2014-04-06 20:12:45") damoon.compute(obs) for i, lst in enumerate(moon["lst"]): moon["ra"][i] = 24. * damoon.ra / (2. * np.pi) moon["dec"][i] = np.rad2deg(damoon.dec) obs.date += 1. / 86164. damoon.compute(obs)
from objects import OBJECTS from utils import FourierFilter, Analyzer font = dict( size = 14 ) matplotlib.rc('font', **font) B = 10.0 wl = 3e8 / 10.67e9 datafile = "../data/3C144-4_3_2014-23.npz" logfile = "../data/logs/3C144-4_3_2014-23-log" crab = Analyzer(datafile, logfile, dt=1.0, ra=24. * OBJECTS["3C144"]["ra"] / (2. * np.pi)) plt.subplot(211) plt.plot(crab["ha"], crab["volts"]) plt.xlabel(r"Hour angle [h]", fontsize=18) plt.ylabel(r"Power", fontsize=18) plt.subplot(212) trans = np.fft.fft(crab["volts"]) freqs = np.fft.fftfreq(len(trans), 2. * np.pi * 1.0 / 86164.) plt.plot(np.fft.fftshift(freqs), np.fft.fftshift(abs(trans)**2)) plt.xlabel(r"Frequency [rad$^{-1}$]", fontsize=18) plt.ylabel(r"Power", fontsize=18) # First we set invalid points (from telescope homing) to the avg_dc crab.flatten_invalid_points()