for L in LS: for I in IS: # data = data_stream(path='C:\\Users\\karnk\\git\\data_stream\\dataset', type=type, pattern=pattern, len=L, # interval=I) data = data_stream( path= 'D:\\git_project\\data stream\\lightcurve_benchmark\\{}' .format(data_type), type=type, pattern=pattern, len=L, interval=I) data.load_data_fromfile() # binning process cheb = adwin(max_window=60, k=k) for i, file_name in enumerate(data.get_files_test()): change_points = [] # file_name = data.get_file_name(i) instances = data.get_dataset_test(i) for index, instance in enumerate(instances): is_change = cheb.add_element(instance) if is_change: change_points.append(index) # change_points = [1,200,300] data.set_change_points(change_points) # print(change_points) # result_name = '{}_W{}_L{}'.format(type,L,I)
IS = [5,10,30,60] process_var = 1 process_mean = 0 for type in types: for pattern in patterns: for L in LS: for I in IS: # data = data_stream(path='C:\\Users\\karnk\\git\\data_stream\\dataset', type=type, pattern=pattern, len=L, # interval=I) data = data_stream(path='D:\\git_project\\data stream\\lightcurve_benchmark\\pca', type=type, pattern=pattern, len=L, interval=I) data.load_data_fromfile() # cheb cheb = adwin(max_window=500,k=3) #Kalman for i,file_name in enumerate(data.get_files_test()): change_points = [] # file_name = data.get_file_name(i) measurements = data.get_dataset_test(i) xs = np.zeros((len(measurements), 2)) ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update(x, x_var, measurement, model_variance) xs[i] = x,x_var