def test_save_and_initialise_from_results(example_detector, tmpdir): det = example_detector det.run() evt = EvaluationTool() evt.build_EvaluationTool_via_run_detector(det) # Save to HD evt.store_results_to_HD(tmpdir.join("test_save_evt.txt")) assert os.path.isfile(tmpdir.join("test_save_evt.txt")) # Load into a new evt evt_load = EvaluationTool() evt_load.build_EvaluationTool_via_results(tmpdir.join("test_save_evt.txt")) # Check that key quantities have been restored assert evt_load.type == 4 # Detector has already been run assert evt_load.results[1] == evt.results[1] # execution time for rlld_l, rlld in zip(evt_load.results[8], evt.results[8]): # run length log distribution assert rlld_l == rlld
"""Fill in and obtain normal and demeaned versions""" temperatures_spatial[selection, location] = ( station_means[location] + month_means[location, month] + year_effect_controlling_for_months) temperatures_spatial_demeaned[selection2, location] = ( temperatures_spatial[selection2, location] - station_means[location] - month_means[location, month]) """Normalize""" temperatures_spatial_demeaned = (temperatures_spatial_demeaned - np.mean( temperatures_spatial_demeaned, axis=0)) / np.sqrt( np.var(temperatures_spatial_demeaned, axis=0)) """STEP 3: Read in the results""" """Read in results""" EvT = EvaluationTool() EvT.build_EvaluationTool_via_results(results_file) """STEP 4: Get your plots""" segmentation = EvT.results[EvT.names.index("MAP CPs")][-2] model_labels = EvT.results[EvT.names.index("model labels")] num_models = len(np.union1d(model_labels, model_labels)) relevant_models = np.union1d([seg[1] for seg in segmentation], [seg[1] for seg in segmentation]) mods = [8, 11, 13, 17, 18] all_models = np.linspace(0, len(model_labels) - 1, len(model_labels), dtype=int) """"STEP 5: Get annotations""" #http://file.scirp.org/pdf/ACS_2013062615184222.pdf #https://en.wikipedia.org/wiki/History_of_climate_change_science #https://en.wikipedia.org/wiki/Climate_change_in_Europe
reader = csv.reader(csvfile) for row in reader: raw_data += row raw_data_float = [] for entry in raw_data: raw_data_float.append(float(entry)) raw_data = raw_data_float T = int(len(raw_data)) S1, S2 = 1,1 #S1, S2 give you spatial dimensions data = np.array(raw_data).reshape(T,1,1) """STEP 2: Read in the data to create your EvT objects""" EvTKL = EvaluationTool() EvTKL.build_EvaluationTool_via_results(result_path + "//" + results_file_KL) EvTDPD = EvaluationTool() EvTDPD.build_EvaluationTool_via_results(result_path + "//" + results_file_DPD) if plot1: """STEP 3: Set up the figure properties and plot""" height_ratio =[8,10] #10,10 custom_colors = ["blue", "purple"] fig, ((ax1, ax2),(ax3,ax4)) = plt.subplots(2, 2, figsize=(12,5), sharex = 'col', sharey = 'row',# True, gridspec_kw = {'height_ratios':height_ratio}) ax_arrays = np.array([np.array([ax1, ax2]), np.array([ax3, ax4])])
int(len(model_universe))) detector = Detector( data, model_universe, model_prior, cp_model, S1, S2, T, exo_data=None, num_exo_vars=None, threshold=200, store_rl=True, store_mrl=True)#, detector.run() """build evaluation tool""" EvT = EvaluationTool() EvT.build_EvaluationTool_via_run_detector(detector) else: """Directly read results""" result_path = baseline_working_directory EvT = EvaluationTool() EvT.build_EvaluationTool_via_results(result_path + "//" + "results_demo.txt") """Plot in panels: Raw data (offsetting the mean), CPs + RLD 1-step-ahead-prediction + variance model posterior""" custom_colors_models = ['green', 'purple', 'orange', 'blue', 'darkgray'] custom_colors_series = ['black']*4 custom_linestyles =['solid']*5 offsets = np.linspace(1,S1*S2, S1*S2).reshape(S1, S2) data_original = data.copy() data = data + offsets*2.5 data = data.reshape(T, S1*S2) """"Get some quantities needed for rest"""
with open(baseline_working_directory + "//" + date_file) as csvfile: reader = csv.reader(csvfile) for row in reader: #DEBUG: Unclear if this is needed if count > 0: myl += row count += 1 if count % 2500 == 0: print(count) dates = [] for e in myl: dates.append(int(e)) result_path = baseline_working_directory EvT = EvaluationTool() EvT.build_EvaluationTool_via_results(result_path + "//" + results_file) """Using the dates, select your range and indices: select 03/07/1975 -- 31/12/2008, i.e. find indices that correspond""" start_test = dates.index(19740703) start_algo = dates.index(19750703) stop = dates.index(20081231) """time period for which we want RLD""" start_comparison = dates.index(20070801)#dates.index(19980102) stop_comparison = stop#stop all_dates = [] for d in dates[start_comparison:stop-2]: s = str(d) all_dates.append(datetime.date(year=int(s[0:4]), month=int(s[4:6]), day=int(s[6:8])))
for _2h in range(0, 12 * 7): selection_2h = [False] * _2h + [ True ] + [False] * (12 * 7 - 1 - _2h) selection = (selection_2h * int(T / (7 * 12)) + selection_2h[:(T - int(T / (7 * 12)) * 7 * 12)]) mean_2h[_2h, station] = np.mean(data[selection, station]) data[selection, station] = (data[selection, station] - mean_2h[_2h, station]) if normalize: data = (data - np.mean(data, axis=0)) / np.sqrt(np.var(data, axis=0)) intercept_priors = np.mean(data, axis=0) """""STEP 2: READ RESULTS""" "" EvTKL, EvTDPD = EvaluationTool(), EvaluationTool() EvTKL.build_EvaluationTool_via_results(results_file_KL) EvTDPD.build_EvaluationTool_via_results(results_file_DPD) """Get dates""" def perdelta(start, end, delta, date_list): curr = start while curr < end: #yield curr date_list.append(curr) curr += delta all_dates = [] #start_year, start_month, start_day, start_hour = 2002, 8, 17, 0 #start_datetime = datetime.datetime(year = 2002, month = 8, day = 17, hour = 0)