def detect_memory(self, start, end, incr):
     """
     Tests the number of tagged particles over a range of area fractions, and 
     returns a list of area fractions where memories are detected. 
     
     Args:
         start (float): The first area fraction in the detection
         end (float): The last area fraction in the detection
         incr (float): The increment between test swells. Determines accuracy of the memory detection. 
     Returns:
         (np.array): list of swells where a memory is located
     """
     area_frac = np.arange(start, end, incr)
     curve = self.tag_curve(area_frac)
     zeros = np.zeros(curve.shape)
     pos = np.choose(curve < 0, [curve, zeros])
     neg = np.choose(curve > 0, [curve, zeros])
     indices = peak.indexes(pos, 0.5, incr)
     nindices = peak.indexes(-neg, 0.5, incr)
     matches = []
     for i in indices:
         for j in nindices:
             desc = True
             if (i < j):
                 for k in range(i, j):
                     if (curve[k] < curve[k + 1]):
                         desc = False
                 if (desc):
                     matches.append(i)
     return area_frac[matches]
Esempio n. 2
0
    def freq_hrv_ls(self, t, y):
        """Returns HRV (heart rate variability) in frequency domain using Lomb-Scargle periodogram.
        Note - it may contain noise.

        :param t: (array) input time stamps of phase values
        :param y: (array) input phase values
        :return: periodogram value amplitudes with corresponding frequencies
        """
        indexes_peaks = indexes(y, min_dist=self.samp_rate / 2)

        if len(indexes_peaks) < 2:
            return np.array([0]), np.array([0])

        peaklist = indexes_peaks
        RR_list = np.diff(t[indexes_peaks]) * 1000

        RR_x = peaklist[1:]
        RR_y = RR_list

        f = np.linspace(0.01, 0.5, 100)

        try:
            pgram = signal.lombscargle(RR_x, RR_y, f, normalize=True)
        except ValueError:
            return np.array([0]), np.array([0])

        return pgram, f
    def izvrsimerenje(self):
        global perioda
        global values
        global pozicijamaksimuma
        global brojmax

        if (len(values) > 20):
            pozicijamaksimuma = indexes(np.array(values), thres=7.0 / max(values), min_dist=2)
            brojmax = len(pozicijamaksimuma)

            if (brojmax > 2):
                for x in range(1, brojmax):
                    if (pozicijamaksimuma[x - 1] != 0):
                        perioda += pozicijamaksimuma[x] - pozicijamaksimuma[x - 1]
                    else:
                        QtWidgets.QMessageBox.information(self, "Error!",
                                                          """Congratulations! You've caused an rare error! \n Please re-run program and start all over """)

                perioda = perioda / (brojmax)
                perioda = perioda * 0.5
                udisajpm = 60 / perioda
                udisajpmint = int(udisajpm)

                QtWidgets.QMessageBox.information(self, "Success!",
                                                      """Prosečna perioda disanja je: {:.2f} sekundi\n Prosečan broj udisaja po minuti: {} udisaja po minuti""" .format(perioda, udisajpmint))

        else:
                QtWidgets.QMessageBox.information(self, "Greška!",
                                     """Nema dovoljno podataka za izvršavanje merenja!""")
Esempio n. 4
0
    def time_rr_intervals(self, t, y):
        """Returns heartbeat RR / NN intervals on a filtered input signal
        Note - it may contain noise values.

        :param t: (array) input time stamps of phase values
        :param y: (array) input phase values
        :return: (array) filtered heartbeat NN intervals
        """
        # HRV time
        indexes_peaks = indexes(y, min_dist=self.samp_rate / 2)
        RR_times = t[indexes_peaks]
        RR_intervals = np.diff(RR_times)

        RR_new = []
        for i in range(1, len(RR_intervals), 1):
            orig = RR_intervals[i - 1]
            new = RR_intervals[i]
            inc_dec = abs(((new - orig) / orig) * 100)
            if inc_dec > 25:
                continue
            else:
                RR_new.append(new)

        if len(RR_new) == 0:  # to deal with noisy data
            RR_new = [0]

        return np.array(RR_new)
def PPG_Peaks(data, freq, plot=False, remove_extreme=False):
	'''
	Performs the peak detection in steps. filtering (lowpass and cubic), peak detections (adaptive treshold
	and minimum distance) and lastly find the amplitudes for each peak, from the baseline removed signal.
	'''

	# filters
	_data = data
	_data = lowpass_butter_filter(_data)
	_data = extreme_removal(_data) if remove_extreme else _data
	_data = cubing_filter(_data)

	# peak detection provided by peakutils package, it uses adaptive treshold and minimum distance
	slice = 1/3
	_peaks = indexes(_data, min_dist=freq*slice)
	peaks = [softTemplate(data, i, freq) for i in _peaks]

	# peak amps from filtered data
	amps = [_data[i] for i in peaks]

	if plot:
		b_data = data-baseline(data, 2)
		plot_data([data+10, b_data], labels=['PPG', 'PPG Baselined'], normalization=True, indice=(0,len(data)))
		#plot_data([None, b_data], peaksIndexs=[None,peaks], labels=[None,'PPG Baselined'], normalization=False, indice = (0,len(data)))
		#plot_data([None, None, _data], peaksIndexs=[None, None, _peaks], labels=[None,'PPG Baselined', 'PPG Filtered'], normalization=False, indice = (0,len(data))) 
		#plot_data([data, None, _data], peaksIndexs=[peaks, None, _peaks], labels=['PPG', 'PPG Baselined','PPG Filtered'], normalization=False, indice = (0,len(data))) 

	return peaks, amps
Esempio n. 6
0
def get_peaks(image, distance ):
    seq = np.sum(image, axis=0, dtype =int) 
    maxx = np.max(seq) 
    thresh = maxx * 0.25
    #peaks,_ = find_peaks(seq, height=thresh, width = 10)
    peaks = indexes(seq, min_dist = distance) 
    #print("peaks:", len(peaks), " ", peaks)
    return peaks 
    def detect_memory_xform(self, start, end, incr, scale_x=1, scale_y=1):
        """
        Memory Read-Out function set for reading along a given axis.
        
        Tests the number of tagged particles over a range of area fractions, and 
        returns a list of area fractions where memories are detected. 
        
        Args:
            start (float): The first area fraction in the detection
            end (float): The last area fraction in the detection
            incr (float): The increment between test swells. Determines accuracy of the memory detection. 
            scale_x (float): scaling in x direction (function keeps particle area the same, no need for double inputting to account for particle area)
            scale_y (float): scaling in y direction (function keeps particle area the same, no need for double inputting to account for particle area)
        Returns:
            (np.array): list of swells where a memory is located
        """
        for i in self.centers:  #Transform centers along readout axis
            i[0] = i[0] * (scale_x / scale_y)
            i[1] = i[1] * (scale_y / scale_x)
        area_frac = np.arange(start, end, incr)
        curve = self.tag_curve_xform(area_frac, scale_x, scale_y)
        zeros = np.zeros(curve.shape)
        pos = np.choose(curve < 0, [curve, zeros])
        neg = np.choose(curve > 0, [curve, zeros])
        indices = peak.indexes(pos, 0.5, incr)
        nindices = peak.indexes(-neg, 0.5, incr)
        matches = []
        for i in indices:
            for j in nindices:
                desc = True
                if (i < j):
                    for k in range(i, j):
                        if (curve[k] < curve[k + 1]):
                            desc = False
                    if (desc):
                        matches.append(i)
        for i in self.centers:  #Transform centers back
            i[0] = i[0] * (scale_y / scale_x)
            i[1] = i[1] * (scale_x / scale_y)
        return area_frac[matches]

        # def find_axis(self,theta):
        '''
Esempio n. 8
0
def spike_detection(trace, times, threshold=-25.):
    from peakutils.peak import indexes

    idxs = indexes(np.asarray(trace), thres=0.5, min_dist=2)
    valid_idxs = []
    for idx in idxs:
        if trace[idx] >= threshold:
            valid_idxs.append(idx)
    idxs = valid_idxs
    spike_times = times[idxs]
    return np.asarray(spike_times)
Esempio n. 9
0
    def sample_explain_depict(self,
                              decision,
                              n_exp,
                              num_samples=None,
                              measure_min=0.05,
                              measure_max=2,
                              number_eval=500,
                              dest=None):
        """ This method provide sa robust explanation by finding accuracy (or metric) peaks between the measures.
        :param decision: Decision of interest.

        :param n_exp: Max number of explanations.
        :param num_samples: Number of samples to be sampled by the sampler.
        :param measure_min: Min measure to be used in the model.
        :param measure_max: Max measure to be used in the model.
        :param number_eval: Number of evaluations between measure_min and measure_max.
        :param dest: Destination to save the image.
        """

        linspace = np.linspace(measure_min, measure_max, number_eval)
        metric, weights = [], []

        for i in linspace:
            tmp = self.explanation.sample_explain_depict(
                decision, num_samples=num_samples, measure=i, depict=False)
            metric.append(tmp['metric'])

        metric = np.array(metric)

        min_len = len(metric) / 10
        peaks = indexes(metric, 0.6, min_len)
        print(peaks)
        n_exp = min(n_exp, len(peaks))

        fig, axs = plt.subplots(1, 1 + n_exp, figsize=(3 + 3 * n_exp, 2.5))

        for i, j in zip(peaks, np.arange(1, n_exp + 1)):
            axs[0].plot(linspace[i], metric[i], "b*")
            self.explanation.sample_explain_depict(decision,
                                                   num_samples=5000,
                                                   measure=linspace[i],
                                                   depict=True,
                                                   axis=axs[j])
        axs[0].plot(linspace, metric)
        axs[0].set_xlabel("$\ell$")
        axs[0].set_title("Weighted Accuracy")

        if dest is None:
            plt.show()
        else:
            plt.savefig(dest, bbox_inches="tight")
Esempio n. 10
0
def maximo_peak (vector):
    try:
        import numpy as np
        from peakutils.peak import indexes
        import peakutils
        indexes = indexes(np.array(vector), thres=1.0/max(vector), min_dist=10)
        kk = list(indexes)
        for j in kk:
            if vector[j]> (sum(vector) / len(vector)):
                tiempos = j
                intensidades = vector[j]
        return tiempos,intensidades
    except:
        return 'nan','nan'
Esempio n. 11
0
def peaks(vector):

    from peakutils.peak import indexes
    vector=cleansignal(vector)
    thres=1/max(abs(vector))
    indexes2 = indexes(vector, thres, min_dist=len(vector)/2)
    if len(indexes2)==2:

        s=vector[indexes2[0]:indexes2[1]]
        #print(s)
        zero_crossings = np.where(np.diff(np.sign(s)))[0]
        if len(zero_crossings)==2:
            indexes2[1]=zero_crossings[1]+indexes2[0]
    return indexes2
Esempio n. 12
0
    def find_peaks_pts(data, width, initial_threshold, steps_size):
        """
        find the maximum points in data
        Args:
            data: processed data that has one or two Gaussian like features
            width: expected width of features

        Returns:
            idx, data[idx]
            index and value of peaks

        """

        threshold = initial_threshold  # initial threshold
        continue_search = True
        # number of peaks of previous attempt
        number_peaks_previous = -1


        while continue_search:
            idx = indexes(np.array(data), thres=threshold, min_dist=width)
            #             print(idx)
            if len(idx) > 2:
                threshold += steps_size
            elif len(idx) == 2:
                # double peak detected, maybe need to check if reasonable here
                continue_search = False
            elif len(idx) == 1:
                # single peak detected
                continue_search = False
            elif len(idx) == 0:
                # peak detection in this iteration but was successful before
                # this means that we should go back and raise the threshold slower
                if number_peaks_previous >0:
                    threshold -= steps_size # go back to previous threshold that was successful
                    steps_size /= 2. # reduce stepsize by factor 2
                else:
                    # search failed
                    continue_search = False

            number_peaks_previous = len(idx)

        return idx, data[idx]
Esempio n. 13
0
def slice_offset(origin_image, image_to_verify):
    filter_func = lambda x: 0 if x < grey_threshold else 255
    origin_image_grey = origin_image.filter(ImageFilter.FIND_EDGES).convert('L').point(filter_func)
    image_to_verify_grey = image_to_verify.filter(ImageFilter.FIND_EDGES).convert('L').point(filter_func)
    if platform.system() == 'Darwin':
        image_to_verify_grey = image_to_verify_grey.resize((origin_image.width, origin_image.height))
    origin_image_grey.show()
    image_to_verify_grey.show()

    x_diff = [0] * origin_image.width
    for i in range(origin_image.width - 1, -1, -1):
        diff_count = 0
        for j in range(origin_image.height - 1, -1, -1):
            if origin_image_grey.getpixel((i, j)) != image_to_verify_grey.getpixel((i, j)):
                diff_count += 1
        x_diff[i] = diff_count
    waves = indexes(np.array(x_diff), thres=7.0/max(x_diff), min_dist=20)
    print("waves:", ' '.join((str(x) for x in waves)))
    offset = waves[2] - waves[0]
    print('offset:', offset)
    return offset
Esempio n. 14
0
    def breathing_intervals(self, t, y):
        """Returns intervals between each breaths. (sometimes called respiratory rate (RR) intervals)
        Note - it may contain noise.

        :param t: (array) input time stamps of phase values
        :param y: (array) input phase values
        :return: array of RR intervals
        """
        y = pd.DataFrame(y)
        # t=pd.DataFrame(t)
        y_ma = np.array(y.rolling(int(self.samp_rate * 1), center=True).mean())
        y_not_nans = np.logical_not(np.isnan(y_ma))
        y_not_nans = y_not_nans[:, 0]
        y_ma = y_ma[y_not_nans, 0]
        t_ma = t[y_not_nans]
        indexes_peaks = indexes(y_ma, min_dist=self.samp_rate * 1.4)

        BB_times = t_ma[indexes_peaks]
        BB_intervals_diffs = np.diff(BB_times)

        return BB_intervals_diffs
Esempio n. 15
0
def plot_time_and_frequency(win_t,
                            win_y,
                            fltrd_hp,
                            samp_rate,
                            fltrd,
                            xlabel='frequency (breaths per minute)'):
    """Signal plotting"""
    plt.figure(1)
    plt.clf()
    plt.subplot(121)
    plt.xlabel('time (s)', fontsize=12)
    plt.ylabel('distance', fontsize=12)
    plt.plot(win_t, win_y, label='Signal')
    plt.plot(win_t, fltrd_hp, 'orange', label='Filtered signal')
    indexes_peaks = indexes(fltrd_hp, min_dist=samp_rate / 2)
    plt.plot(win_t[indexes_peaks], fltrd_hp[indexes_peaks], 'ro')
    plt.legend()
    plt.subplot(122)
    plt.xlabel(xlabel, fontsize=12)
    plt.ylabel('amplitude', fontsize=12)
    plt.plot(fltrd[0], fltrd[1], 'orange')
def PPG_Peaks(data, freq, plot=False, remove_extreme=True, freq_up=256):
	'''
	Performs the peak detection in steps. filtering (lowpass and cubic), peak detections (adaptive treshold
	and minimum distance) and lastly find the amplitudes for each peak, from the baseline removed signal.
	'''
	
	timecol = np.linspace(0,len(data),len(data))

	# filters
	_data, _timecol = data, timecol
	#_data, _timecol = upsample(data, freq, freq_up)
	_data = lowpass_butter_filter(_data)
	_data = extreme_removal(_data) if remove_extreme else _data
	_data = cubing_filter(_data)

	# peak detection provided by peakutils package, it uses adaptive treshold and minimum distance
	peaks = indexes(_data, min_dist=(1/2.5)*freq_up)
	peaks_sec = [_timecol[i]/freq for i in peaks]

	# peak amps from filtered data
	amps = np.array([abs(_data[i])**(1/3) for i in peaks])

	if plot:
		import matplotlib.pyplot as plt
		plt.plot(timecol/freq, data, 'blue', label='raw')
		plt.plot(_timecol/freq, _data, 'green', label='filt')

		b = [_data[int(j*freq)] for j in peaks_sec]
		b = np.array(b)
		plt.plot(peaks_sec, b, 'rx', label='filt peaks')

		plt.legend()
		plt.show()
		#b_data = data-baseline(data, 2)
		#plot_data([data+10, b_data], labels=['PPG', 'PPG Baselined'], normalization=True, indice=(0,len(data)))
		#plot_data([None, b_data], peaksIndexs=[None,peaks], labels=[None,'PPG Baselined'], normalization=False, indice = (0,len(data)))
		#plot_data([None, None, _data], peaksIndexs=[None, None, _peaks], labels=[None,'PPG Baselined', 'PPG Filtered'], normalization=False, indice = (0,len(data))) 
		#plot_data([data, None, _data], peaksIndexs=[(peaks_sec*freq_up).astype(int), None, peaks], labels=['PPG', 'PPG Baselined','PPG Filtered'], normalization=True, indice = (0,15000)) 

	return peaks_sec, amps
Esempio n. 17
0
def HPS(fft, fft_size):
    "Returns the fundamental frecuency given the fft of the chunk"

    # Create array of arrays, each of them with the fft decimated in a factor
    downsamples = [fft
                   ] + [fft[0:-1:fac] for fac in range(2, NUM_HARMONICS + 2)]
    # Group amplitudes of same frequencies (index) and multiply them together
    product = [reduce(lambda x, y: x * y, freq) for freq in zip(*downsamples)]
    # Get the max indexes among the products
    indexes = peak.indexes(np.array(product), THRES_AMPL, MIN_DIST)

    # Discard chunks with noise and multiple frequency components
    if len(indexes) != 1:
        return None
        # If the max is not in the found peak it's not valid
    elif product.index(max(product)) != indexes[0]:
        return None

    # Multiply index by the fft precision (1/fft_size)...
    # ... and by the frequency used (fs / dec_factor)
    # The result is the equivalent frequency in Hz and must be returned
    return indexes[0] * RATE / float(DEC_FACTOR * fft_size)
Esempio n. 18
0
def main(args):
    if "-p" in args:
        drawChart = True
    else:
        drawChart = False
    filename = args[0]
    data = read_file(filename, rmnl=True)
    try:
        float(data[0])
        data = [float(item) for item in data]
    except:
        print("here")
        data = [
            round(float(item[item.find("=") + 2:]), 4) for item in data[13:-1]
        ]  #consider cut value here or round(xx, 4)
    peaks = list(indexes(np.array(data), thres=3.0 / max(data), min_dist=300))
    print("First peak position: %s, force: %s[N]" %
          (peaks[0], data[int(peaks[0])]))  #returns first value

    if drawChart:
        data1 = [key for key, val in enumerate(data)]
        draw_chart(data1, data, [], peaks)
Esempio n. 19
0
    def _computeSignal(self, signal):
        obj = {}

        # Best min_dist & thres for sphygmogram signal
        peaks = peak.indexes(signal, min_dist=56, thres=0.16)

        # Ignore un normal signls (with no peaks)
        if (len(peaks) == 0): return obj

        nn = tools.nn_intervals(peaks)

        # Ignore un normal signls (with no NN)
        if (len(nn) == 0): return

        welch = {'welch': self._welch_psd(nn, peaks)}
        lomb = {'lomb': self._lomb_psd(nn, peaks)}
        ar = {'ar': self._ar_psd(nn, peaks)}

        obj['welch'] = self._walk_over(welch, 'welch')
        obj['lomb'] = self._walk_over(lomb, 'lomb')
        obj['ar'] = self._walk_over(ar, 'ar')

        return obj
Esempio n. 20
0
    def _computeSignal(self, signal):
        obj = {}

        # Best min_dist & thres for sphygmogram signal
        peaks = peak.indexes(signal, min_dist=56, thres=0.16)

        # Ignore un normal signls (with no peaks)
        if (len(peaks) == 0): return obj

        nn = tools.nn_intervals(peaks)

        # Ignore un normal signls (with no NN)
        if (len(nn) == 0): return

        # Poincare method
        poincare = {'poincare': self._poincare(nn, peaks)}
        obj['poincare'] = self._walk_over(poincare, 'poincare')

        # ACF
        acf = {'ACF': self._ACF(signal, int(len(signal) / 2))}
        obj['ACF'] = self._walk_over(acf, 'ACF')

        return obj
Esempio n. 21
0
def FFTPeaks(cur_data,
             in_time,
             cut_off_sig_len,
             num_peaks,
             min_dist=5,
             thres=0.1):
    sample_freq = np.mean(1 / (np.diff(in_time)))
    sample_period = 1 / sample_freq
    sig_len = len(cur_data)
    fft_result = np.fft.fft(cur_data)
    sig_fft = np.abs(fft_result / sig_len)
    # plot fft freq result
    hf_len = int(sig_len / 2)
    fft_y = sig_fft[1:hf_len + 1]
    fft_y[1:-1] = 2 * fft_y[1:-1]
    freq_x = sample_freq * range(0, hf_len) / sig_len
    # find peaks and generate features with interpolative hist count
    all_maxs_idxs = pk.indexes(fft_y[:cut_off_sig_len], thres, min_dist)
    maxs_idxs = [
        each[1]
        for each in sorted([(fft_y[idx], idx) for idx in all_maxs_idxs],
                           reverse=True)[:min(num_peaks, len(all_maxs_idxs))]
    ]
    return freq_x, fft_y, maxs_idxs
Esempio n. 22
0
def number_peaks(a):
	#number_of_peaks = 0
	index = indexes(np.array(a))
	return len(index)
Esempio n. 23
0
def guessKey(ciphertext=None,verbose=False):
    a = ciphertext
    b = ciphertext
    freq = []
    for i in range(0, len(a) - 1):
        b = " " + b[:-1]
        c = 0
        for j in range(i, len(a)):
            if b[j] == a[j]:
                c += 1
        freq.append(c)
    peaks = indexes(freq, thres=0.678)
    possible_keys = []
    possible_keys.append(peaks[1] - peaks[0])
    key = possible_keys[0]

    # cipher
    j = 0
    ciphers = []
    p = ""
    while (j < key):
        i = j
        p = ""
        while (i < len(a)):
            p = p + a[i]
            i = i + key
        j += 1
        ciphers.append(p)

    # frequency
    ciphers_freq = []
    for i in range(0, len(ciphers)):
        arr = [0] * 26
        p = ciphers[i]
        j = 0
        while (j < len(p)):
            pos = (ord(p[j]) - 13) % 26
            arr[pos] += 1
            j += 1
        ciphers_freq.append(arr)

    # map of fequency
    ind = []
    for i in range(0, 26):
        ind.append(i)

    def shift_left(f, t1):
        i = 0
        while i < f:
            t2 = t1
            t1 = t2[1:len(t2)] + [t2[0]]
            i += 1
        return t1

    j = 0
    char_freq = [0.08, 0.02, 0.03, 0.04, 0.13, 0.02, 0.02, 0.06, 0.07, 0.0, 0.01, 0.04, 0.02, 0.07, 0.08, 0.02, 0.0,
                 0.06, 0.06, 0.09, 0.03, 0.01, 0.02, 0.0, 0.02, 0.0]

    freq_sums = []

    def get_key(ciphers_freq, key_len):
        guessed_key = ""
        for l in range(0, key_len):
            freq_sums = []
            for i in range(0, 26):
                shift = shift_left(i, ciphers_freq[l])
                sum = 0
                for k in range(0, 26):
                    sum += shift[k] * char_freq[k]
                freq_sums.append(sum)
            freq_max = zip(freq_sums, ind)
            freq_max = list(freq_max)
            freq_max = sorted(freq_max, reverse=True)
            guessed_key += chr(65 + freq_max[0][1])
        return guessed_key

    print("[+] Guessed Key: " + get_key(ciphers_freq, key))
    return get_key(ciphers_freq, key)
Esempio n. 24
0
    def _computeSignal(self, signal):
        obj = {}

        # Best min_dist & thres for sphygmogram signal
        peaks = peak.indexes(signal, min_dist=56, thres=0.16)

        # Ignore un normal signls (with no peaks)
        if (len(peaks) == 0): return obj

        nn = tools.nn_intervals(peaks)

        # Ignore un normal signls (with no NN)
        if (len(nn) == 0): return

        # Standard
        obj = dict(td.nni_parameters(nn, peaks), **obj)
        obj = dict(td.nni_differences_parameters(nn, peaks), **obj)
        obj = dict(td.sdnn(nn, peaks), **obj)
        obj = dict(td.sdnn_index(nn, peaks), **obj)
        obj = dict(td.sdann(nn, peaks), **obj)
        obj = dict(td.rmssd(nn, peaks), **obj)
        obj = dict(td.sdsd(nn, peaks), **obj)
        obj = dict(td.nn50(nn, peaks), **obj)
        obj = dict(td.nn20(nn, peaks), **obj)
        obj = dict(td.geometrical_parameters(nn, peaks, plot=False), **obj)
        del obj['nni_histogram']

        # Additional
        obj = dict({'cv': self._cv(obj['sdnn'], obj['nni_mean'])}, **obj)

        peaks_diff = tools.nni_diff(peaks)
        obj = dict({'MxDMn': max(peaks_diff) - min(peaks_diff)}, **obj)
        obj = dict({'MxRMn': max(peaks_diff) / min(peaks_diff)}, **obj)
        obj = dict({'Mo': stats.mode(peaks_diff)[0][0]}, **obj)

        counter = Counter(peaks_diff)
        idx = list(counter.keys()).index(obj["Mo"])
        obj = dict({'AMo': list(counter.values())[idx]}, **obj)
        obj = dict({'SI': obj['AMo'] / (2 * obj['Mo'] * obj['MxDMn'])}, **obj)

        # Autocorrelation function

        # Frequency stats
        welch = frequency_domain(signal).stats['welch']['params']
        bands = list(welch['fft_bands'].keys())

        obj = dict({'TP': welch['fft_total']}, **obj)

        obj = dict({'HF': welch['fft_rel'][bands.index('hf')]}, **obj)
        obj = dict({'LF': welch['fft_rel'][bands.index('lf')]}, **obj)
        obj = dict({'VLF': welch['fft_rel'][bands.index('vlf')]}, **obj)
        obj = dict({'ULF': welch['fft_rel'][bands.index('ulf')]}, **obj)

        obj = dict({'HFav': welch['fft_abs'][bands.index('hf')]}, **obj)
        obj = dict({'LFav': welch['fft_abs'][bands.index('lf')]}, **obj)
        obj = dict({'VLFav': welch['fft_abs'][bands.index('vlf')]}, **obj)
        obj = dict({'ULFav': welch['fft_abs'][bands.index('ulf')]}, **obj)

        obj = dict({'(LF/HF)av': obj['LFav'] / obj['HFav']}, **obj)
        obj = dict({'IC': obj['LF'] / obj['VLF']}, **obj)

        for k in obj:
            if (math.isnan(obj[k])):
                obj[k] = 0

        return obj
Esempio n. 25
0
def guessKey(ciphertext=None, verbose=False):
    a = ciphertext
    b = ciphertext
    freq = []
    for i in range(0, len(a) - 1):
        b = " " + b[:-1]
        c = 0
        for j in range(i, len(a)):
            if b[j] == a[j]:
                c += 1
        freq.append(c)
        # print(c)
    # peaks = find_peaks(freq)
    peaks = indexes(freq, thres=0.678)
    # print(freq)
    # print(peaks)
    possible_keys = []
    possible_keys.append(peaks[1] - peaks[0])
    logging.info("Possible Key Length: {} ".format(possible_keys[0]))
    # print(possible_keys[0])
    # print(type(possible_keys[0]))
    # key = input("Select a Key: ")
    # print(peaks)
    # print(freq)
    #logging.debug("Possible Key Lengths : {} ".format(possible_keys))
    logging.debug("Peaks : {}".format(peaks))
    logging.debug("Freq  : {}".format(freq))
    key = possible_keys[0]

    # ciphers
    j = 0
    ciphers = []
    p = ""
    while (j < key):
        i = j
        p = ""
        while (i < len(a)):
            # print(a[i],end=" ")
            p = p + a[i]
            i = i + key
        j += 1
        ciphers.append(p)
        # print(p)

    # frequency
    ciphers_freq = []
    for i in range(0, len(ciphers)):
        arr = [0] * 26
        p = ciphers[i]
        # i = 0
        # arr = [0] * 26
        j = 0
        while (j < len(p)):
            pos = (ord(p[j]) - 13) % 26
            arr[pos] += 1
            j += 1
        ciphers_freq.append(arr)
    # print("Ciphers: ")
    # print(ciphers_freq)
    logging.debug("Ciphers : {}".format(ciphers_freq))
    # map of fequency
    ind = []
    for i in range(0, 26):
        ind.append(i)

    def shift_left(f, t1):
        i = 0
        while i < f:
            t2 = t1
            t1 = t2[1:len(t2)] + [t2[0]]
            i += 1
        return t1

    j = 0
    char_freq = [
        0.08, 0.02, 0.03, 0.04, 0.13, 0.02, 0.02, 0.06, 0.07, 0.0, 0.01, 0.04,
        0.02, 0.07, 0.08, 0.02, 0.0, 0.06, 0.06, 0.09, 0.03, 0.01, 0.02, 0.0,
        0.02, 0.0
    ]

    freq_sums = []

    def get_key(ciphers_freq, key_len):
        guessed_key = ""
        for l in range(0, key_len):
            freq_sums = []
            for i in range(0, 26):
                shift = shift_left(i, ciphers_freq[l])
                sum = 0
                for k in range(0, 26):
                    sum += shift[k] * char_freq[k]
                freq_sums.append(sum)
            freq_max = zip(freq_sums, ind)
            freq_max = list(freq_max)
            freq_max = sorted(freq_max, reverse=True)
            # print(freq_max[0][1])
            guessed_key += chr(65 + freq_max[0][1])
        return guessed_key

    logging.info("Guessed Key: " + get_key(ciphers_freq, key))
    print("[+] Guessed Key: " + get_key(ciphers_freq, key))
    return get_key(ciphers_freq, key)
Esempio n. 26
0
def processVid(vid):

    y_img = []
    try:

        video_capture = cv2.VideoCapture(os.path.join(train_path + '/', str(vid)))
        name = vid
        #global countVid
        global counter 
        
        print(' BEGIN to process Video : ' + str(vid)  + ' has frame rate  and label = ' + str(json1_data[name]['label']))
        #countVid+=1

        # Initialize variables
        face_locations = []
        
        #os.mkdir(cropedTrainAllImgDir + '/' + name[:-4])
        greenChanel_sig = []
        while video_capture.isOpened(): 
            
            #print('back in for at iteration -- ' + str(i))
            #try :
                        
            counter += 1
            #print(' processing  frame no ' + str(counter)  + '... :)')
            ret, frame = video_capture.read()

            if counter >= 295 :
                break

            #if counter % 20 != 0:
            #s    continue

            print(' counter = ' + str(counter))
            # Grab a single frame of video
            #vidCaptureandFrameFlipT0 = time.time()

            # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
            rgb_frame = frame[:, :, ::-1]

            #vidCaptureandFrameFlipT1 = time.time()

            #totalVidCapturenFrameFlip = vidCaptureandFrameFlipT1 - vidCaptureandFrameFlipT0
            #print('vidCaptureandFrameFlip of image took -- ' + str(totalVidCapturenFrameFlip) + '  milliseconds')

            #faceRecogT0 = time.time()

            # Find all the faces in the current frame of video
            face_locations = face_recognition.face_locations(rgb_frame)

            #faceRecogT1 = time.time()

            #faceRecogtime = faceRecogT1 - faceRecogT0
            #print('faceRecog time of image took -- ' + str(faceRecogtime) + '  milliseconds')


            if face_locations is not None:
            # Display the results
                for face_position in face_locations:
                    # Draw a box around the face

                    #croppingLoopT0 = time.time()

                    offset = round(margin * (face_position[2] - face_position[0]))
                    y0 = max(face_position[0] + offset, 0)
                    x1 = min(face_position[1] - offset, rgb_frame.shape[1])
                    y1 = min(face_position[2] - offset, rgb_frame.shape[0])
                    x0 = max(face_position[3] + offset, 0)
                    face = rgb_frame[y0:y1,x0:x1]

                    #print('dim of face = ' + str(face.shape ))

                    green_channelVals = np.mean(np.mean(face,axis=1),axis=0)[1]

                    #print('shape of green cqhnnel signal = ' + str(green_channelVals.shape))

                    #print('.. and some values from this green cqhnnel signal = ' + str(green_channelVals))

                    greenChanel_sig.append(green_channelVals)

                    #croppingLoopT1 = time.time(),

                    # cv2.imshow('full_frame',frame)

                    # if cv2.waitKey(1) & 0xFF == ord('q'):
                    #     continue

                    # cv2.imshow('blah',face)

                    # if cv2.waitKey(1) & 0xFF == ord('q'):
                    #     continue

                    #totalCrop = croppingLoopT1 - croppingLoopT0
                    #print('cropping of image took -- ' + str(totalCrop) + '  milliseconds')

                    #cv2.rectangle(face, (left, top), (right, bottom), (0, 0, 255), 2)

                    #imgResizernAppendT0 = time.time()

                    inp = cv2.resize(face,(size,size))
                    #IMAGES.append(np.expand_dims(inp,axis=0))   

                    # imgResizernAppendT1 = time.time()

                    # imgResizenAppend = imgResizernAppendT1 - imgResizernAppendT0
                    # print('resizing and appending of image took -- ' + str(imgResizenAppend) + '  milliseconds')

                    #print('shape of inp to go into IMAGES datastruct = ' + str(inp.shape))

                    #saveDir = cropedTrainAllImgDir 

                    # imageWriteT0 = time.time()
                    #imageio.imwrite(saveDir + '/' + name + '_' + str(counter) + '.jpg', face)
                    
                    # imageWriteT1 = time.time()

                    # total = imageWriteT1 - imageWriteT0
                    # print('saving of image took -- ' + str(total) + '  milliseconds')
            else:
                print('no face found in frame ' + str(counter))
                continue

        #print(' length of green signal  = ' + str(greenChanel_sig))

        greenChanel_sig_np = np.array(greenChanel_sig) #.flatten()

        print('video => ' + str(name) + ' green channel signal : ')
        print(greenChanel_sig_np)

        bestSnR_FfullSignal = greenChanel_sig_np

        print('It looks like ... ')

        F_rate = 27.27
        avgLength = 30

        heartRate_sig = butter_lowpass_filter(bestSnR_FfullSignal, 2.0, F_rate, order=4)
        print('got past  OUTER low_pass filter and doing convolution -- hr signal pre-smooth is ...')
            
        N=25
        smoothed_sig  = np.convolve(heartRate_sig, np.ones((N,))/N)[(N-1):]    

        print(' DID convolution & about to do indexes func. ... heartRate_sig = ' + str(heartRate_sig))

        print('got past  convolution -- -- hr signal post-smooth is ...')

        plt.plot(smoothed_sig)
        plt.show()
        
        indices = indexes(np.array(smoothed_sig), thres=0.0001, min_dist=0)

        print(' Found peaks with indexes func.  & finding diff idir na peaks ... indices = ' + str(indices))
            
        peak_index_vect=indices[1:]
    
        hitv=np.zeros((len(peak_index_vect)-1))
        #hitv1=np.zeros((len(peak_index_vect)-1))

        for j in range(0,len(peak_index_vect)-1):
            hitv[j] = peak_index_vect[j+1]-peak_index_vect[j]       

        ## Mean of time dist between two peaks = HR:
        #avg_peakDist=np.mean(hitv)
        
        hitv=hitv[0:len(peak_index_vect)-2]     

        print(' getting mean of diff idir na peaks  Function... hitv non short = ' + str(hitv))
    
        ## Mean of time dist between two peaks = HR:
        avg_peakDist=np.mean(hitv) #_short)       

        print(' finally calculating the acg. HR rate from this ...')     
        
        avg_HR=round((60.0*F_rate)/avg_peakDist)  
        #hr_list.append(avg_HR)
        #global_HR=hr_list[-1]
        
        #count=count+1
        #print('After')
        #print(count)
        #print('seconds .....')
        
        print('**************************************************')
        print('****************  avg. exact HR = ***************')
        print((60.0*F_rate)/avg_peakDist)
        print('**************************************************')
        
        print('**************************************************')
        print('**************** Rounded AVG. HR = ***************')
        print(avg_HR)
        print('**************************************************') 


        plt.plot(smoothed_sig)
        plt.show()


            # if json1_data[name]['label'] == 'REAL':
            #     y_img.append(0)
            # else:
            #     y_img.append(1)     
        
    except Exception as inst:
    
        print("Exceptopn occured at doing videos image file  no  ...")
        print(inst)    

    return IMAGES,y_img
Esempio n. 27
0
def getPeaks(signal):
    return peak.indexes(signal, min_dist=56, thres=0.16)
Esempio n. 28
0
 def getCOSFIRETuples(self, bank, xc, yc):
     operator = []
     phiList = np.arange(360) * np.pi / 180.0  #Discretizacion del circulo
     for i in range(len(self.rhoList)):  #Iteramos en lista de radios
         if self.rhoList[i] == 0:  #Caso rho=0
             if self.nameFilter == 'Gabor':
                 for k in range(self.filterParam[2].size):
                     ind = 0
                     val = -1
                     tupla = np.zeros(4)
                     for l in range(self.filterParam[3].size):
                         par = (self.filterParam[2][k],
                                self.filterParam[3][l])
                         if self.input[par][xc][yc] > self.maxi * self.t2:
                             ind = l
                             val = self.input[par][xc][yc]
                     if val > -1:
                         tupla[2] = self.filterParam[3][ind]
                         tupla[3] = self.filterParam[2][k]
                         operator.append(tupla)
             elif self.nameFilter == 'DoG':
                 for k in range(self.filterParam[0].size):
                     if self.input[self.filterParam[0]
                                   [k]][xc][yc] > self.maxi * self.t2:
                         tupla = np.zeros(3)
                         tupla[2] = self.filterParam[0][k]
                         operator.append(tupla)
         elif self.rhoList[i] > 0:  #Caso rho>0
             listMax = np.zeros(360)
             direcciones = []
             for k in range(phiList.size):
                 yi = int(yc +
                          np.floor(self.rhoList[i] * np.cos(phiList[k])))
                 xi = int(xc -
                          np.floor(self.rhoList[i] * np.sin(phiList[k])))
                 val = 0
                 nr = self.patternImage.shape[0]
                 nc = self.patternImage.shape[1]
                 if xi >= 0 and yi >= 0 and xi < nr and yi < nc:
                     for l in self.input:
                         if self.input[l][xi][yi] > val:
                             val = self.input[l][xi][yi]
                 listMax[k] = val
                 direcciones.append((xi, yi))
             ss = int(360 / 16)
             #nn=np.arange(360)
             #plt.plot(nn,listMax)
             if len(np.unique(listMax)) == 1:
                 continue
             listMax1 = np.zeros(listMax.size + 1)
             for p in range(listMax.size):
                 listMax1[p + 1] = listMax[p]
             index = indexes(listMax1, thres=0.2, min_dist=ss)
             index = list(index - 1)
             index = np.array(index)
             for k in range(index.size):
                 if self.nameFilter == 'Gabor':
                     for l in range(self.filterParam[2].size):
                         mx = -1
                         ind = 0
                         for m in range(self.filterParam[3].size):
                             par = (self.filterParam[2][l],
                                    self.filterParam[3][m])
                             var = self.input[par][direcciones[
                                 index[k]][0]][direcciones[index[k]][1]]
                             if var > self.t2 * self.maxi:
                                 if mx < var:
                                     mx = var
                                     ind = m
                         if mx != -1:
                             tupla = np.zeros(4)
                             tupla[0] = self.rhoList[i]
                             tupla[1] = index[k] * (np.pi / 180.0)
                             tupla[2] = self.filterParam[3][ind]
                             tupla[3] = self.filterParam[2][l]
                             operator.append(tupla)
                 elif self.nameFilter == 'DoG':
                     for l in self.input:
                         var = self.input[l][direcciones[index[k]][0]][
                             direcciones[index[k]][1]]
                         if var > self.t2 * self.maxi:
                             tupla = np.zeros(3)
                             tupla[0] = self.rhoList[i]
                             tupla[1] = index[k] * (np.pi / 180.0)
                             tupla[2] = l
                             operator.append(tupla)
     return operator
def calculations(time, accel, numpy):
    # Sampling Frequency
    Fs = 1 / (time[1] - time[0])

    # Zero Padding
    N = len(time)

    # Apply hanning window over interval
    w = numpy.hanning(len(accel))
    coherentGain = sum(w) / len(accel)
    hanningApplied = numpy.zeros((len(accel)))
    hanningApplied = accel[:] * w[:] / coherentGain

    # Extract Windowed Double Sided FFT for phase and frequency analysis
    fftPolarDouble = numpy.fft.fft(hanningApplied, N) / N
    fftPolarSingle = fftPolarDouble[1:int(N / 2) + 1]

    fftSmooth = abs(fftPolarSingle)
    end = len(fftPolarSingle)

    fftPolarSingle[2:end - 1] = 2 * fftPolarSingle[2:end - 1]
    fftSmooth[2:end - 1] = 2 * fftSmooth[2:end - 1]

    # Scale Frequency Bins
    freqBin = Fs * numpy.arange(int(N / 2)) / N
    ampl = []

    # Find first 3 largest peaks
    indexes = peak.indexes(fftSmooth, min_dist=2)
    for i in range(len(indexes)):
        ampl.append(fftSmooth[indexes[i]])
        Fs = indexes

    # Number of harmonics to extract from fft
    harmonics = len(ampl)
    if harmonics > 3:
        harmonics = 3

    # Calculating phase shift of acceleration and using fundamental frequency
    if len(indexes) > 1:
        z = fftPolarSingle[Fs[0:harmonics]]
        fcc = freqBin[Fs[0]]
        theta = numpy.arctan2(z[:].imag, z[:].real)
    elif len(indexes) == 1:
        z = fftPolarSingle[numpy.int_(Fs)]
        fcc = freqBin[numpy.int_(Fs)]
        theta = numpy.arctan2(z.imag, z.real)
    else:
        return 0, 0

    # Calculating S_k (cm) given A_k(m/s/s) and fcc, finding phase shift
    A_k = ampl[0:harmonics]
    tmp = numpy.arange(1, harmonics + 1)
    tmp1 = (tmp * tmp) * (2 * pi * fcc)**2
    S_k = 100 * (A_k / tmp1)
    phi = theta + pi

    # Calculating Displacement Series
    sofT = numpy.zeros(len(time))
    if harmonics != 1:
        for i in range(0, harmonics):
            sofT += S_k[i] * numpy.cos(2 * pi * (i + 1) * fcc * time + phi[i])

    else:
        sofT = S_k * numpy.cos(2 * pi * fcc * time + phi)

    depth = max(sofT) - min(sofT)
    try:
        rate = fcc[0] * 60
    except:
        rate = fcc * 60

    #Writes to txt (debugging only)

    # Plots graph (development only)
    #graph.plot(freqBin, fftSmooth, "fbin (s)", "Amplitude", "Distance vs Time", 311, 1, plt)
    #graph.plot(time, hanningApplied, "Time (s)", "Accel", "Hanning vs Time", 312, 0, plt)
    #graph.plot(time, sofT, "Time (s)", "Displacement", "Distance vs Time", 313, 0, plt)
    #plt.show(block=False)

    return sofT, rate
Esempio n. 30
0
# Plot Values
axs[2].set_title("(c) Values (\%)")
axs[2].plot(linspace, weight[0], label="F1")
axs[2].plot(linspace, weight[1], label="F2")
axs[2].set_xlabel("$\ell$")
axs[2].legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.suptitle("Multiples Explanations for Values of $\ell$")
plt.subplots_adjust(wspace=0.25, top=0.7)
plt.savefig("./imgs/test_logistic_regression_robust_explanation_1.pdf",
            bbox_inches="tight")

fig, axs = plt.subplots(1, 3, figsize=(9, 2.5))

min_len = len(df2.interp_metric) / 10

peaks = indexes(df2.metric, 0.6, min_len)

for i, j in zip(peaks, [1, 2]):
    axs[0].plot(df2.measure[i], df2.metric[i], "b*")
    exp.sample_explain_depict(decision,
                              num_samples=5000,
                              measure=df2.measure[i],
                              depict=True,
                              axis=axs[j])
axs[0].set_xlabel("$\ell$")
axs[0].set_title("Weighted Accuracy")

plt.savefig("./imgs/test_logistic_regression_robust_explanation_2.pdf",
            bbox_inches="tight")