def cal_k_tran(data, input_k, input_bin): x_start = 10 x_var_start = 10000 model_mean = 0 model_variance = 0.01 data.clear_change_points() cheb = chebyshev_base(min_size=60, max_window=60, k=input_k) for i, file_name in enumerate(data.get_files_test()): change_points = [] measurements = data.get_dataset_test(i) # xs = np.zeros((len(measurements), 2)) # ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update(x, x_var, measurement, model_variance) # xs[i] = x, x_var x, x_var = kalman.predict(x, x_var, model_mean, model_variance) # ps[i] = x, x_var is_change = cheb.add_element(x) if is_change: change_points.append(index) data.set_change_points(change_points) return data
def get_changepoint_BinCheb(self, instances, is_normal=False): cheb = "" if self.is_k: cheb = chebyshev_base(min_size=self.min_winsize, max_window=self.max_winsize, k=self.K) else: cheb = chebyshev_base_e(min_size=self.min_winsize, max_window=self.max_winsize, e=self.E) bin = binning(bin_period=self.Bin_size) change_points = [] temp_bin = [] mean_list = [] # instances = data.get_dataset_test(i) for index, instance in enumerate(instances): is_change = False can_add_bin = bin.can_add_bin() if not can_add_bin: temp_bin.append(bin.get_bin()) mean_bin = mean(bin.get_bin()) mean_list.append(mean_bin) is_change = cheb.add_element(mean_bin) bin = binning(bin_period=self.Bin_size) bin.add_instance(instance) if is_change: if is_normal: for j in range(index - self.Bin_size, index): change_points.append(j) else: change_points.append(index) self.cheb = cheb self.ex = cheb.get_exception() return change_points, mean_list
def cal_k_tran(data,input_k,input_bin): data.clear_change_points() cheb = chebyshev_base(min_size=60, max_window=60, k=input_k) bin = binning(bin_period=input_bin) for i, file_name in enumerate(data.get_files_test()): change_points = [] instances = data.get_dataset_test(i) for index, instance in enumerate(instances): is_change = False can_add_bin = bin.can_add_bin() if can_add_bin: bin.add_instance(instance) else: mean_bin = mean(bin.get_bin()) is_change = cheb.add_element(mean_bin) bin.reset_bin() # if is_change: # change_points.append(index) if is_change: for j in range(index - bin_period + 1, index + 1): change_points.append(j) data.set_change_points(change_points) return data
def get_changepoint_baseCheb(self, instances): cheb = chebyshev_base(min_size=self.min_winsize, max_window=self.max_winsize, k=self.K) change_points = [] for index, instance in enumerate(instances): is_change = False is_change = cheb.add_element(instance) if is_change: change_points.append(index) std_low_list = cheb.get_std_low_list() std_high_list = cheb.get_std_high_list() mean_list = cheb.get_mean_list() return change_points, std_low_list, std_high_list, mean_list
def get_changepoint_kalmanBinCheb(self, instances, inpput_x=10, input_var=10000, model_mean=0, model_variance=0.01): x_start = inpput_x x_var_start = input_var model_mean = model_mean model_variance = model_variance cheb = chebyshev_base(min_size=self.min_winsize, max_window=self.max_winsize, k=self.K) bin = binning(bin_period=self.Bin_size) change_points = [] measurements = instances # xs = np.zeros((len(measurements), 2)) # ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update(x, x_var, measurement, model_variance) # xs[i] = x, x_var x, x_var = kalman.predict(x, x_var, model_mean, model_variance) # ps[i] = x, x_var is_change = False can_add_bin = bin.can_add_bin() if not can_add_bin: mean_bin = mean(bin.get_bin()) is_change = cheb.add_element(mean_bin) bin.reset_bin() bin.add_instance(x) if is_change: for j in range(index - self.bin_period + 1, index + 1): change_points.append(j) return change_points
def get_changepoint_kalmanCheb(self, measurements, inpput_x=10, input_var=10000, model_mean=0, model_variance=0.01): x_start = inpput_x x_var_start = input_var model_mean = model_mean model_variance = model_variance cheb = chebyshev_base(min_size=self.min_winsize, max_window=self.max_winsize, k=self.K) change_points = [] # measurements = instances # xs = np.zeros((len(measurements), 2)) # ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update(x, x_var, measurement, model_variance) # xs[i] = x, x_var x, x_var = kalman.predict(x, x_var, model_mean, model_variance) # ps[i] = x, x_var is_change = False is_change = cheb.add_element(x) if is_change: change_points.append(index) std_low_list = cheb.get_std_low_list() std_high_list = cheb.get_std_high_list() mean_list = cheb.get_mean_list() return change_points, std_low_list, std_high_list, mean_list
def cal_k_tran(data, input_k, input_bin): x_start = 10 x_var_start = 10000 model_mean = 0 model_variance = 0.01 data.clear_change_points() cheb = chebyshev_base(min_size=60, max_window=60, k=input_k) bin = binning(bin_period=input_bin) for i, file_name in enumerate(data.get_files_test()): change_points = [] measurements = data.get_dataset_test(i) xs = np.zeros((len(measurements), 2)) ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update(x, x_var, measurement, model_variance) xs[i] = x, x_var x, x_var = kalman.predict(x, x_var, model_mean, model_variance) ps[i] = x, x_var is_change = False can_add_bin = bin.can_add_bin() if not can_add_bin: mean_bin = mean(bin.get_bin()) is_change = cheb.add_element(mean_bin) bin.reset_bin() bin.add_instance(x) if is_change: for j in range(index - bin_period + 1, index + 1): change_points.append(j) data.set_change_points(change_points) return data
for L in LS: for I in IS: # data = data_stream(path='C:\\Users\\karnk\\git\\data_stream\\dataset', type=type, pattern=pattern, len=L, # interval=I) data = data_stream( path= 'D:\\git_project\\data stream\\lightcurve_benchmark\\{}' .format(data_type), type=type, pattern=pattern, len=L, interval=I) data.load_data_fromfile() # cheb cheb = chebyshev_base(min_size=60, max_window=60, k=k) #Kalman for i, file_name in enumerate(data.get_files_test()): change_points = [] # file_name = data.get_file_name(i) measurements = data.get_dataset_test(i) xs = np.zeros((len(measurements), 2)) ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update( x, x_var, measurement, model_variance) xs[i] = x, x_var
for type in types: for pattern in patterns: for L in LS: for I in IS: # data = data_stream(path='C:\\Users\\karnk\\git\\data_stream\\dataset', type=type, pattern=pattern, len=L, # interval=I) data = data_stream( path='D:\\git_project\\data stream\\dataset', type=type, pattern=pattern, len=L, interval=I) data.load_data_fromfile() # cheb cheb = chebyshev_base(max_window=500, k=3) change_points = [] #Kalman for i in range(data.get_file_lenght()): file_name = data.get_file_name(i) measurements = data.get_dataset_test(i) xs = np.zeros((len(measurements), 2)) ps = np.zeros((len(measurements), 2)) x = x_start x_var = x_var_start for index, measurement in enumerate(measurements): x, x_var = kalman.update(x, x_var, measurement, model_variance) xs[i] = x, x_var
types = ["hinet"] patterns = ["si"] LS = [1,5] IS = [100,1000] for type in types: for pattern in patterns: for L in LS: for I in IS: # data = data_stream(path='C:\\Users\\karnk\\git\\data_stream\\dataset', type=type, pattern=pattern, len=L, # interval=I) data = data_stream(path='D:\\git_project\\data stream\\dataset', type=type, pattern=pattern, len=L, interval=I) data.load_data_fromfile() # binning process cheb = chebyshev_base(min_size=500,k=3) change_points = [] for i in range(data.get_file_lenght()): file_name = data.get_file_name(i) instances = data.get_dataset_test(i) bin = binning(bin_period=bin_period) for index, instance in enumerate(instances): is_change = False can_add_bin = bin.can_add_bin() if can_add_bin: bin.add_instance(instance) else: mean_bin = mean(bin.get_bin()) is_change = cheb.add_element(mean_bin) bin.reset_bin()