def segment_heuristically(self, sensors, reference_signal, label=None): p = PeakAnalysis(self.view) peaks = p.get_peaks(reference_signal) timestamps = [] for peak_position in peaks: timestamps.append(sensors[0].timestamp[peak_position]) if label: label_timestamps = label.timestamp labels = label.label closer_labels = [] closer_timestamps = [] for i in range(0, len(label_timestamps)): min_dist = label_timestamps[len(label_timestamps) - 1] - label_timestamps[0] closer_label = None closer_timestamp = None for heuristic_timestamp in timestamps: dist = sqrt( np.square(label_timestamps[i] - heuristic_timestamp)) if dist < min_dist: min_dist = dist closer_label = labels[i] closer_timestamp = heuristic_timestamp closer_labels.append(closer_label) closer_timestamps.append(closer_timestamp) self.segment(sensors, closer_timestamps, closer_labels) else: self.segment(sensors, timestamps)
def segment_heuristically(self, sensors, reference_signal, label=None): p = PeakAnalysis(self.view) peaks = p.get_peaks(reference_signal) timestamps = [] for peak_position in peaks: timestamps.append(sensors[0].timestamp[peak_position]) if label: label_timestamps = label.timestamp labels = label.label closer_labels = [] closer_timestamps = [] for i in range(0, len(label_timestamps)): min_dist = label_timestamps[len(label_timestamps) - 1] - label_timestamps[0] closer_label = None closer_timestamp = None for heuristic_timestamp in timestamps: dist = sqrt(np.square(label_timestamps[i] - heuristic_timestamp)) if dist < min_dist: min_dist = dist closer_label = labels[i] closer_timestamp = heuristic_timestamp closer_labels.append(closer_label) closer_timestamps.append(closer_timestamp) self.segment(sensors, closer_timestamps, closer_labels) else: self.segment(sensors, timestamps)
def get_statistical_features(self, data): p = PeakAnalysis(self.view) min_value = np.amin(data) max_value = np.amax(data) root_mean_square = UMath.get_root_mean_square(data) peaks_number = np.mean(len(p.get_peaks(data))) crest_factor = np.median(p.get_peak_to_average_ratios(data)) skewness = stats.skew(data, False) kurtosis = stats.kurtosis(data, False) variance = np.var(data) return [min_value, max_value, root_mean_square, peaks_number, crest_factor, skewness, kurtosis, variance]
def analyzePlot(self,plotindex=-1): x,y=self.plot.plot.lines[plotindex].get_data() pylab.ion() fh=pylab.figure() pylab.plot(x,y,'ko-') CEN,FWHM,PEAK =PeakAnalysis.PeakAnalysis(x,y,nb=3,plotpoints=True) print 'Center=%e; FWHM=%e, PEAK=%e' %(CEN,FWHM,PEAK)
def fitgauss(x, y, e, a_init=None, x0_init=None, sigma_init=None, c_init=None): n_bkg = 3 g = globals() g["datax"] = x g["datay"] = y g["datae"] = e (x0_guess, fwhm_guess, peak_guess) = PeakAnalysis.PeakAnalysis(x, y, nb=n_bkg) if (x0_init is None): x0_init = x0_guess if (sigma_init is None): sigma_init = fwhm_guess / 2.35 if (c_init is None): c_init = (y[0:n_bkg].mean() + y[-1 - n_bkg:-1].mean()) / 2. if (a_init is None): a_init = (y - c_init).max() print x0_init, sigma_init, c_init, a_init m = minuit2.Minuit2(chi2gauss, a=a_init, x0=x0_init, sigma=sigma_init, c=c_init) m.printMode = 1 m.migrad() fit_par = m.values fit_err = m.errors fit = fit_par["a"] * gauss(x, fit_par["x0"], fit_par["sigma"]) + fit_par["c"] return (fit_par, fit_err, x, fit)
def get_statistical_features(self, data): p = PeakAnalysis(self.view) min_value = np.amin(data) max_value = np.amax(data) root_mean_square = UMath.get_root_mean_square(data) peaks_number = np.mean(len(p.get_peaks(data))) crest_factor = np.median(p.get_peak_to_average_ratios(data)) skewness = stats.skew(data, False) kurtosis = stats.kurtosis(data, False) variance = np.var(data) return [ min_value, max_value, root_mean_square, peaks_number, crest_factor, skewness, kurtosis, variance ]
if pos == smaller: new_data.append(data[smaller]) else: # smaller == pos 인 경우 smaller == larger 이니까 이렇게 else를 larger = smaller + 1 으로 larger = smaller + 1 slope = data[larger] - data[smaller] new_data.append(data[smaller] + slope * (pos - smaller)) new_data.append(data[peaks[-1]]) return new_data if __name__ == '__main__': inflation, unemployment = load_data() # why not use mark real peaks, remove ambiguous peaks? mod_inf, peak_inf = PeakAnalysis.find_peak(inflation, 0.6) mod_une, peak_une = PeakAnalysis.find_peak(unemployment, 0.7) matched = match_peaks(mod_inf, peak_inf, mod_une, peak_une) interpolated_inf = interpolate_peaks(mod_inf, list(map(lambda t: t[0], matched))) interpolated_une = interpolate_peaks(mod_une, list(map(lambda t: t[1], matched))) assert (len(interpolated_inf) == len(interpolated_une)) # with EXPRESSION as NAME: with open('interpolated_data.csv', 'w') as f: for idx in range(len(interpolated_inf)): f.write(