Exemple #1
0
def calc_rr():
    """
    Uses most recent 10 seconds of data to calculate average RR
    """
    from numpy import array
    import sys
    from peakdet import peakdet
    arr = array(moving_avg(array(WINDOW)))
    with open('test.txt', 'w') as f:
        f.writelines([str(x) + ',' for x in arr])

    # peaks = peakutils.peak.indexes(arr, thres=0.01, min_dist=5)
    peaks = peakdet(arr, 5)
    peaks = peaks[0]
    print(peaks)
    print(len(peaks))
    sys.exit(0)

    try:
        beats_per_second = len(peaks) / (TIMES[len(TIMES) - 1] - TIMES[0])
        print(TIMES[len(TIMES) - 1] - TIMES[0])
        RR = str(beats_per_second * SECONDS_PER_MINUTE)
    except ZeroDivisionError:
        RR = str(-1)
    return RR
def calligraphic_fit(points, loopiness):
    """Adds `loopiness` numer of loops along the curve where the curvature is highest."""
    CURVATURE_FLAT = 1
    LOOP_SIZE = 8

    ## normalize the points so that the paramters and constatns make more sense across different datasets
    points_means = points.mean(axis=0, keepdims=True)
    points_stds = points.std(axis=0, keepdims=True)
    points = (points - points_means) / points_stds

    ## fit a spline
    num_points = points.shape[0]
    u = np.linspace(0, 1, num_points)

    k = 3
    # the number of control points is about one third of the original number of points
    # determines the extend of the smoothing
    t = np.linspace(0, 1, num_points // 3)
    t = np.concatenate([[0] * k, t, [1] * k])

    spl = interp.make_lsq_spline(u, points, t, k=k)

    ## calc curvature and find extrema
    u2 = np.linspace(0, 1, num_points * 10)
    x = interp.splev(u2, spl)
    xdot = interp.splev(u2, spl, 1)
    xddot = interp.splev(u2, spl, 2)
    # see: https://www.math.tugraz.at/~wagner/Dreibein
    curvature = cross(xdot, xddot) / norm(xdot)**3

    mins, maxes = peakdet(curvature, delta=0)

    ## add `loopiness` number of loops where the curvature is highest
    peaks = np.concatenate([mins, maxes])
    peaks = sorted(peaks, key=lambda p: abs(curvature[p]), reverse=True)

    slices = []
    Slice = namedtuple("Slice", ["i", "j", "x"])
    for peak in peaks[:loopiness]:
        # find the first points left and right of the peak that have a low enough curvature
        for i in reversed(range(0, peak)):
            if np.sign(curvature[peak]) * curvature[i] <= CURVATURE_FLAT:
                break
        for j in range(peak + 1, len(curvature)):
            if np.sign(curvature[peak]) * curvature[j] <= CURVATURE_FLAT:
                break

        # extend the loop out from these low-curvature points
        C = line_line_intersect(x[i - 1], x[i], x[j], x[j + 1])
        # todo: maybe handle the case when there is no intersection
        loop = loop_the_loop(x[i], x[j], C, LOOP_SIZE)
        slices.append(Slice(i, j + 1, loop))

    x_new = replace_slices(x, slices)

    ## undo the normalization
    x_new = x_new * points_stds + points_means
    return x_new
def extract_spiketimes(neuron_type,condition,ntrial,nrun):
    '''
    Extract Spike Times from NEUORN's voltage trace
    Nneurons: number of neurons to be analyzed
    condition: Specific Lesion - e.g., Control, No_VIPcells
    ntrial: mouse ID
    nrun: number of trial/run
    '''
    dt = 0.1
    filepath = '../Simulation_Results/'+learning+'/'+condition+'/Trial_'+str(ntrial)+'/Run_'+str(nrun)
    
    # Peakdet parameters
    delta = 1
    
    if neuron_type=='_pvsoma_':
        Nneurons=130
        thres = 5 " remove spikelets
    elif neuron_type=='_bcell_':
        Nneurons=8
        thres = 0
    elif neuron_type=='_vipcck_' or neuron_type=='_vipcrnvm_':
        Nneurons=1
        thres = 0
    elif neuron_type=='_vipcr_':
        Nneurons=4
        thres = 0        
    else:
        Nneurons=2
        thres = 0
        
    print Nneurons
    spiketimes_all=[]
    for n_neuron in xrange(Nneurons):

        
        filename = filepath+'/Trial_'+ntrial+'_Run_'+nrun+neuron_type+str(n_neuron)+'.dat'
    
        
        data = np.loadtxt(filename)
        # remove the first 400ms
        data = data[int(400/dt):]

        maxtab, mintab = peakdet(data, delta, thres)
        if maxtab.size!=0:    
            spiketimes = [int(i*dt) for i in maxtab[:,0]]
        else:
            spiketimes=[]
        spiketimes.insert(0,str(n_neuron))
        
        spiketimes_all.append(spiketimes)
    
    #Saves the list in a pickle file at the specified Run directory
    filewrite = filepath + '/spiketimes'+ neuron_type +'.pkl'
    with open(filewrite, 'wb') as handle:
        pickle.dump(spiketimes_all, handle, protocol=pickle.HIGHEST_PROTOCOL)
Exemple #4
0
def trace_crown_contour(tooth):
  '''
  '''
  height, width, _ = tooth.shape

  # crop to center height
  crown = tooth[0:height/3,:,:]

  # make a grayscale histogram from the crown part of the ROI
  histogram = cv2.calcHist([crown], [0], None, [256], [0,255])

  # flatten and normalize histogram
  histogram = list(chain.from_iterable(histogram))
  histogram = histogram / sum(histogram)

  # find first peak (bin=mu), take the highest point, take 10% of it as a drop
  top = max(histogram)
  peaks, _ = peakdet(histogram, top * 0.1)

  mu = int(peaks[0][0])

  # determine sigma in function of the height at the highest point
  sigma = 1/(math.sqrt(2*math.pi)*(histogram[mu]))

  # compute Gaussian representation
  gaussian = mlab.normpdf(np.arange(len(histogram)), mu, sigma)

  # trace
  contour = []

  # crown center
  center = [ int(width/2), int(height/3) ]

  # for every angle in first two quadrants, find contour point
  for angle in np.arange(0, math.pi, 0.05):
    # construct vector with intensities along a line from the center
    # first get the x and y coordinates
    (x, y) = get_pixels_along_line(center, -angle, (height,width))

    # extract the intensities
    I = tooth[y,x,1]
    
    # convert to probabilities
    P = determine_contour_probabilities(I, histogram, gaussian)

    # highest probability is contour point
    contour_index = np.argmax(P)
    contour_point = [int(x[contour_index]), int(y[contour_index])]

    # colour it
    contour.append(contour_point)
    
  return contour, histogram, mu, sigma
Exemple #5
0
def find_valleys(histogram, drop_pct, center):
  '''
  Given a histogram, finds the valleys and also returns the valleys closest to
  the center.
  @param histogram
  @param drop_pct percentage of drop before a valleys is considered a valley
  @param center to which the closest valleys is considered the central
  @return valleys and center valley
  '''
  _, valleys = peakdet(histogram, drop_pct)
  centers = min(range(len(valleys)), key=lambda i: abs(valleys[i,0]-center))
  return (valleys, centers)
    def findTurningPoints(self):
        Rhip = self.getKeypointSelection('Rhip')
        Lhip = self.getKeypointSelection('Lhip')
        hips = pd.concat([Rhip, Lhip], axis=1, sort=False)
        hips['center'] = hips['X_rot'].mean(axis=1)
        x = hips['center'].shift(-7).rolling(15).median()

        xt = x.interpolate()
        maxtab, mintab = peakdet(xt, 100)
        minPeaks = []
        b, a = np.argmin(mintab, axis=0)
        minPeaks.append(mintab[a][0].astype(int))
        mintab = np.delete(mintab, a, 0)
        b, a = np.argmin(mintab, axis=0)
        minPeaks.append(mintab[a][0].astype(int))
        minPeaks = sorted(minPeaks)
        offsetMiddlePeak = 1000
        for i in range(0, maxtab.shape[0]):
            if abs(600 - maxtab[i][0]) < offsetMiddlePeak:
                offsetMiddlePeak = abs(600 - maxtab[i][0])
                middlePeak = maxtab[i][0]

        middlePeak = middlePeak.astype(int)

        flagXValue = hips.iloc[middlePeak].center
        # find start by walking back from first valley
        for i in range(minPeaks[0], 0, -1):
            searchX = hips.iloc[i].center
            self.startRunOne = 0
            if (searchX > flagXValue) & (i < 250):
                self.startRunOne = hips.iloc[i].frameIndex.values[0]
                break
        # find end by walking forward from last valley
        for i in range(minPeaks[1], hips.shape[0]):
            searchX = hips.iloc[i].center
            self.endRunFour = 1200
            if (searchX > flagXValue) & (i > 800):
                self.endRunFour = hips.iloc[i].frameIndex.values[0]
                break

        self.endRunOne = int(hips.iloc[minPeaks[0]].frameIndex.values[0])
        self.endRunTwo = int(hips.iloc[middlePeak].frameIndex.values[0])
        self.endRunThree = int(hips.iloc[minPeaks[1]].frameIndex.values[0])
        self.endRunFour = int(self.endRunFour)
        self.startRunOne = int(self.startRunOne)
        self.startRunTwo = int(self.endRunOne + 1)
        self.startRunThree = int(self.endRunTwo + 1)
        self.startRunFour = int(self.endRunThree + 1)
        self.startIndex = 6
        self.startFrame = int(self.startRunOne)
        self.endFrame = int(self.endRunFour)
        self.startXVal = int(hips.iloc[(self.startRunTwo)].center)
        self.endXVal = hips.iloc[(self.endRunTwo)].center
Exemple #7
0
def calc_rr():
    """
    Uses most recent 10 seconds of data to calculate average RR
    """
    from numpy import array
    from peakdet import peakdet
    arr = array(moving_avg(array(WINDOW)))
    peaks = peakdet(arr, 1)
    peaks = peaks[0]
    print(len(peaks))
    try:
        beats_per_second = len(peaks) / (TIMES[len(TIMES) - 1] - TIMES[0])
        RR = str(beats_per_second * SECONDS_PER_MINUTE)
    except ZeroDivisionError:
        RR = str(-1)
    return RR
import pandas as pd
import numpy as np
import math
from peakdet import peakdet

df = pd.read_csv('P07-Actigraph-0_9-ms-v2-start-stop-sum.csv', header=0)

maxtab, mintab = peakdet(df['FILTER'], .1)

ymax, ymin = peakdet(df['Axis2'], .1)
zmax, zmin = peakdet(df['Axis3'], .1)

print 'FILTER Max length: ' + str(len(maxtab))
print 'FILTER Min length: ' + str(len(mintab))

print 'Z max length: ' + str(len(zmax))
print 'Z min length: ' + str(len(zmin))

print 'Y max length: ' + str(len(ymax))
print 'Y min length: ' + str(len(ymin))

zsum = 0
for item in zmax:
	zsum += item[1]

zavg = zsum / len(zmax)

print 'Z Avg: ' + str(zavg)

ysum = 0
ax.add_artist(circ1)
plt.plot(0,0,'.',color='gray')
plt.axis('equal')
plt.xlim((-30,30))
plt.ylim((-30,30))
plt.xlabel('Horizontal position (cm)')
plt.ylabel('Vertical position (cm)')
plt.show()

##
#Nystagmus detection
#Find peaks, min and max
dp = 30 # distance between peaks
if rot_speed > dp:
    dp = rot_speed-10
peaks = peakdet.peakdet(w,dp)

maxes = peaks[0].T[0].astype(int)
mins = peaks[1].T[0].astype(int)

#locations = mins
CW = np.where(T[mins]<410)
CCW = np.where(T[mins]>410)
locations = list(mins[CW])+list(mins[CCW])
#locations

#TODO: change with rotation direction
#maxes == CCW
#mins == CW

slopes = []
Exemple #10
0
def extract_spiketimes(neuron_type, condition, ntrial, nrun):
    '''
    Extract Spike Times from NEUORN's voltage trace
    Nneurons: number of neurons to be analyzed
    condition: Specific Lesion
    ntrial: mouse ID
    nrun: number of trial/run
    '''
    dt = 0.1
    filepath = '../' + condition + '/Trial_' + str(ntrial) + '/Run_' + str(
        nrun)

    # Peakdet parameters
    delta = 1

    if neuron_type == '_pvsoma_':
        Nneurons = 130
        thres = 0
    elif neuron_type == '_bcell_':
        Nneurons = 8
        thres = 0
    elif neuron_type == '_vipcck_' or neuron_type == '_vipcrnvm_':
        Nneurons = 1
        thres = 0
    elif neuron_type == '_vipcr_':
        Nneurons = 4
        thres = 0
    else:
        Nneurons = 2
        thres = 0

    print Nneurons
    spiketimes_all = []
    for n_neuron in xrange(Nneurons):

        filename = filepath + '/Trial_' + ntrial + '_Run_' + nrun + neuron_type + str(
            n_neuron) + '.dat'

        path = np.loadtxt(
            '../make_inputs_linear_track/runs_produced_by_python_ec_rand_stops/run_'
            + nrun + '/path.txt',
            'int',
            delimiter=' ')
        tot_time = np.sum(np.bincount(path[:, 0]))  # in ms

        data = np.loadtxt(filename)
        # remove the first 400ms
        data = data[int(400 / dt):int(tot_time / dt) + 1]

        maxtab, mintab = peakdet(data, delta, thres)
        if maxtab.size != 0:
            spiketimes = [round(float(i * dt), 1) for i in maxtab[:, 0]]
        else:
            spiketimes = []
        spiketimes.insert(0, str(n_neuron))

        spiketimes_all.append(spiketimes)

    #Saves the list in a pickle file at the specified Run directory
    filewrite = filepath + '/spiketimes' + neuron_type + '.pkl'
    with open(filewrite, 'wb') as handle:
        pickle.dump(spiketimes_all, handle, protocol=pickle.HIGHEST_PROTOCOL)
Exemple #11
0
    def find_joints_from_intensities(self, intensities, wSize, maxJoints):

        nI = len(intensities)

        wSizeHalf = wSize / 2

        sumDiffArr = np.zeros(nI)
        avgDiffArr = 0
        count = 0
        max = 0
        for i in range(wSizeHalf, nI - wSizeHalf):
            w = intensities[i - wSizeHalf:i + wSizeHalf]
            dw = np.diff(w)
            dAccum = 0
            for dwi in dw:
                dAccum = dAccum + abs(dwi)

            sumDiffArr[i] = dAccum
            avgDiffArr = avgDiffArr + dAccum
            count = count + 1
            if max < dAccum:
                max = dAccum

        if count == 0:
            print "find_joints_from_intensities to small fingerline error"
            return []

        avgDiffArr = avgDiffArr / count
        peakThreshold = (max - avgDiffArr) / 3.0
        print "Joint Detection DiffWin Threshold %d " % peakThreshold

        modPeaks, valeys = peakdet.peakdet(
            sumDiffArr[wSizeHalf:nI - wSizeHalf], peakThreshold)
        # correct peak index
        peaks = []
        for pk in modPeaks:
            peaks.append([pk[0] + wSizeHalf, pk[1]])

        # filter peaks

        # remove first peak if too close to beginning
        if len(peaks) > 0:
            if peaks[0][0] < nI * 0.08:
                peaks.remove(peaks[0])

        # remove peaks if too close together
        minDist = nI * 0.08
        lastPeakPos = -100
        for pk in peaks:
            distToPrevious = pk[0] - lastPeakPos
            if distToPrevious < minDist:
                peaks.remove(pk)
            else:
                lastPeakPos = pk[0]

        # just remove all peaks more than maxJoints :))
        if len(peaks) > maxJoints:
            peaks = peaks[0:maxJoints]

        # uncomment if you want to see graphs
        return peaks

        peakTable = np.zeros(nI)
        for pk in peaks:
            peakTable[int(pk[0])] = pk[1]

        if (self.verbosity > 0):
            plt.plot(intensities)
            plt.plot(sumDiffArr)
            plt.plot(peakTable)
            plt.show()

        return peaks
Exemple #12
0
def getI32(file_name, numberOfSlices, numberOfAllRepitionsParTable):

    #fileSize = fileInfo.bytes/4
    fid = open(file_name)

    fileTable = np.fromfile(fid, dtype=np.float32)

    #fpRawTime	= fileTable[0:len(fileTable):4]
    fpRawResp = fileTable[1:len(fileTable):4]
    fpRawTrig = fileTable[2:len(fileTable):4]
    fpRawCard = fileTable[3:len(fileTable):4]

    # Process Respiration Data
    # Merge 10 Values
    N = 10
    RespBLC = np.convolve(fpRawResp, np.ones((N, )) / N, mode='same')

    # Evalute Baseline Shift
    RespBLC = RespBLC - np.median(RespBLC, axis=None)

    # derivative of respiration
    kernel = [1, 0, -1]
    RespDeriv = np.convolve(RespBLC, kernel, mode='same')
    #RespDeriv = np.gradient(RespBLC)
    # peak detection
    pksRespMax, pksResMin = pk.peakdet(RespBLC * 20, delta=1)
    print('avg. Respiration Rate: ' +
          str(len(pksRespMax) / (len(RespBLC) / 60000)) + ' 1/min')

    # Process Cardiac Data
    fs = 1000  # Sampling Frequency(1 kHz)
    lowcut = 2.5
    highcut = 10.0
    nyq = 0.5 * fs  # Nyquist Frequency (Hz)
    Wp = [lowcut / nyq,
          highcut / nyq]  # Passband Frequencies (Normalised 2.5 - 10 Hz)
    Ws = [0.1 / nyq, 35 / nyq]  # Stopband Frequencies (Normalised)
    Rs = 40  # Sroppband Ripple (dB)
    N = 3  # Filter order
    b, a = sc.cheby2(N, Rs, Ws, btype='bandpass')
    filtCardBLC = sc.filtfilt(b, a, fpRawCard)

    N = 10
    CardBLC = np.convolve(filtCardBLC, np.ones((N, )) / N, mode='same')

    # Evalute Baseline Shift
    CardBLC = CardBLC - np.median(CardBLC, axis=None)

    # derivative of respiration
    kernel = [1, 0, -1]
    CardDeriv = np.convolve(CardBLC, kernel, mode='same')

    # peak detection
    pksCardpMax, pksCardMin = pk.peakdet(CardBLC * 20, delta=1)
    print('avg. Card Rate: ' + str(len(pksCardpMax) / (len(CardBLC) / 60000)) +
          ' 1/min')

    # if the trigger max is not equal 1 but higher
    if max(fpRawTrig) != 1.0:
        fpRawTrig = fpRawTrig - (max(fpRawTrig) - 1)

    # find missing trigger and replace 1 by 0
    idx_missedTrigger = np.where(np.diff(fpRawTrig, 2) == 2)[0] + 1
    if len(idx_missedTrigger) > 0:
        fpRawTrig[idx_missedTrigger + 1] = 0

    triggerDataPoints = np.argwhere(fpRawTrig == 0)
    numberOfTiggers = len(triggerDataPoints)
    numberOfRepitions = numberOfTiggers / (numberOfSlices * 2)
    print('Number of Repetitions: ' + str(numberOfRepitions))

    # if two dataset in a single i32 file
    if numberOfTiggers >= (
        (numberOfAllRepitionsParTable + 5) * numberOfSlices * 2) * 2:
        triggerDataPoints = triggerDataPoints[:int(
            numberOfTiggers /
            2)]  # if more than two dataset in a single i32 file

    old_numberOfTriggers = numberOfTiggers
    # corrected number of triggers
    numberOfTiggers = len(triggerDataPoints)

    # if some wrong triggers in i32 file
    if numberOfTiggers > (numberOfAllRepitionsParTable +
                          5) * numberOfSlices * 2:
        wrongAmountOfTriggers = numberOfTiggers - (
            numberOfAllRepitionsParTable + 5) * numberOfSlices * 2
        fpRawTrig_cut = fpRawTrig[wrongAmountOfTriggers * 100 - 1::]
        triggerDataPoints = np.argwhere(fpRawTrig_cut == 0)
        numberOfTiggers = len(triggerDataPoints)
        numberOfRepitions = numberOfTiggers / (numberOfSlices * 2)
        print('Number of Repetitions: ' + str(numberOfRepitions))

    triggerDataPoints_1st = triggerDataPoints[numberOfSlices * 5 *
                                              2:numberOfTiggers:2, 0]
    triggerDataPoints_2nd = triggerDataPoints[numberOfSlices * 5 * 2 +
                                              1:numberOfTiggers:2, 0]
    usedTriggerAmount = (
        (numberOfAllRepitionsParTable + 5) * numberOfSlices * 2 -
        5 * 2 * numberOfSlices) / 2

    if not len(triggerDataPoints_1st) == len(triggerDataPoints_2nd):
        print('Miss one Trigger in file_name in %s', file_name)
        if len(triggerDataPoints_1st) < usedTriggerAmount:
            if len(triggerDataPoints_2nd) == usedTriggerAmount:
                triggerDataPoints_1st = triggerDataPoints_2nd
            else:
                sys.exit('Trigger does not relate to any slice or rep. Time')

    if len(RespBLC) == len(CardBLC):
        i32Table = np.zeros([len(RespBLC), 4])
    else:
        sys.exit('Respiration and Cardiac Data do not have the same length!')

    i32Table[:, 0] = RespBLC
    i32Table[:, 1] = RespDeriv
    i32Table[:, 2] = CardBLC
    i32Table[:, 3] = CardDeriv

    return triggerDataPoints_1st, i32Table
Exemple #13
0
import pandas as pd
import numpy as np
import math
from peakdet import peakdet

df = pd.read_csv('P07-Actigraph-0_9-ms-v2-start-stop-sum.csv', header=0)

maxtab, mintab = peakdet(df['FILTER'], .1)

ymax, ymin = peakdet(df['Axis2'], .1)
zmax, zmin = peakdet(df['Axis3'], .1)

print 'FILTER Max length: ' + str(len(maxtab))
print 'FILTER Min length: ' + str(len(mintab))

print 'Z max length: ' + str(len(zmax))
print 'Z min length: ' + str(len(zmin))

print 'Y max length: ' + str(len(ymax))
print 'Y min length: ' + str(len(ymin))

zsum = 0
for item in zmax:
    zsum += item[1]

zavg = zsum / len(zmax)

print 'Z Avg: ' + str(zavg)

ysum = 0
def cwesr_fit(x, y):
    [maxtab, mintab] = peakdet.peakdet(y, 5000, x)
    #    indexes = peakutils.indexes(-y, thres=0.4, min_dist=4)
    sm1 = []
    sm2 = []
    #    if len(indexes) > 0:
    #        mintab = np.transpose([x[indexes], y[indexes]])
    for i in range(0, len(mintab)):
        if mintab[i][0] < 2878:
            sm1.append(mintab[i])
        else:
            sm2.append(mintab[i])
    #    mintabs=sorted(mintab, key=lambda x: x[1])
    sm1s = sorted(sm1, key=lambda x: x[1])
    sm2s = sorted(sm2, key=lambda x: x[1])

    #    if len(mintabs) >= 2:
    #        fc1=mintabs[0][0]
    #        fc2 = mintabs[1][0]
    #    elif len(mintabs)==1:
    #        fc1=mintabs[0][0]
    #        fc2=mintabs[0][0]
    #    else:
    #        fc1 = defaultf1
    #        fc2 = defaultf2

    if len(sm1s) >= 1 and len(sm2s) >= 1:
        fc1 = sm1s[0][0]
        fc2 = sm2s[0][0]
    elif len(sm1s) == 0 and len(sm2s) >= 1:
        fc1 = sm2s[0][0]
        fc2 = sm2s[0][0]
    elif len(sm1s) >= 1 and len(sm2s) == 0:
        fc1 = sm1s[0][0]
        fc2 = sm1s[0][0]
    else:
        fc1 = defaultf1
        fc2 = defaultf2

    #if len(sm1s) >= 1 and len(sm2s) >= 1:
    #    guess = [edata[1,1], fc1, gamp, gwidth, fc2, gamp, gwidth]
    #elif len(sm1s) >= 0 and len(sm2s) >= 1:
    #    guess = [edata[1,1], fc1, gamp, gwidth]
    #elif len(sm1s) >= 1 and len(sm2s) >= 0:
    #    guess = [edata[1,1], fc1, gamp, gwidth]
    #else:

    guess = [y[0], fc1, gamp, gwidth, fc2, gamp, gwidth]
    #    guess = [y[1], fc1, gamp, gwidth]

    try:
        #    if len(sm) >= 4:
        #        popt, pcov = curve_fit(func, x, y, p0=guess, bounds=(lbounds4,ubounds4))
        #    elif len(sm) == 3:
        #        popt, pcov = curve_fit(func, x, y, p0=guess, bounds=(lbounds3,ubounds3))
        #    if len(sm1s) >= 1 and len(sm2s) >= 1:
        #        popt, pcov = curve_fit(func, x, y, p0=guess, bounds=(lbounds2,ubounds2))
        #    elif len(sm1s) == 0 and len(sm2s) >= 1:
        #        popt, pcov = curve_fit(func, x, y, p0=guess, bounds=(lbounds1,ubounds1))
        #    elif len(sm1s) >= 1 and len(sm2s) == 0:
        #        popt, pcov = curve_fit(func, x, y, p0=guess, bounds=(lbounds1,ubounds1))
        #    else:
        popt, pcov = curve_fit(func,
                               x,
                               y,
                               p0=guess,
                               bounds=(lbounds2, ubounds2))
#        popt, pcov = curve_fit(func, x, y, p0=guess)
    except:
        popt = [0, 0, 0, 10, 1e3, 0, 10]
        pcov = np.zeros((7, 7))
        print('fit fail')

    fit = func(x, *popt)
    fitg = func(x, *guess)

    return popt, pcov, fit, fitg, np.transpose(mintab)
 def find_joints_from_intensities(self, intensities, wSize, maxJoints ):
         
     nI = len(intensities)
     
     wSizeHalf = wSize / 2
         
     sumDiffArr = np.zeros(nI)
     avgDiffArr = 0
     count = 0
     max = 0
     for i in range(wSizeHalf, nI-wSizeHalf):
         w = intensities[i-wSizeHalf:i+wSizeHalf]
         dw = np.diff(w)
         dAccum = 0
         for dwi in dw:
             dAccum = dAccum + abs(dwi)
             
         sumDiffArr[i] = dAccum
         avgDiffArr = avgDiffArr + dAccum
         count = count + 1
         if max < dAccum :
             max = dAccum
         
     
     if count == 0 :
         print "find_joints_from_intensities to small fingerline error"
         return []
     
     avgDiffArr = avgDiffArr/count
     peakThreshold = (max - avgDiffArr) / 3.0
     print "Joint Detection DiffWin Threshold %d " % peakThreshold
     
     modPeaks, valeys = peakdet.peakdet(sumDiffArr[wSizeHalf:nI-wSizeHalf], peakThreshold)
     # correct peak index
     peaks = [];
     for pk in modPeaks:
         peaks.append([ pk[0]+wSizeHalf, pk[1] ])
    
    
     # filter peaks
     
     # remove first peak if too close to beginning
     if len(peaks) > 0 :
         if peaks[0][0] < nI*0.08 :
             peaks.remove(peaks[0])
             
     # remove peaks if too close together
     minDist = nI*0.08
     lastPeakPos = -100
     for pk in peaks :
         distToPrevious = pk[0] - lastPeakPos
         if distToPrevious < minDist :
             peaks.remove(pk)
         else :
             lastPeakPos = pk[0]
       
     # just remove all peaks more than maxJoints :))
     if len(peaks) > maxJoints:  
         peaks = peaks[0:maxJoints]
                                       
     # uncomment if you want to see graphs 
     return peaks
     
     peakTable = np.zeros(nI)
     for pk in peaks:
         peakTable[ int(pk[0]) ] = pk[1] 
                 
     if(self.verbosity>0):
         plt.plot( intensities )
         plt.plot( sumDiffArr )
         plt.plot( peakTable )
         plt.show()
         
     return peaks      
Exemple #16
0
                tick.label.set_fontsize(14)
                for tick in ax.yaxis.get_major_ticks()
            ]
            zed = [
                tick.label.set_fontsize(14)
                for tick in ax.xaxis.get_major_ticks()
            ]
            ax.plot([5, 5], [0, 20], linewidth=1.6, color='blue')
            ax.plot([5, 105], [00, 00], linewidth=1.6, color='blue')
            plt.legend()

        thres = -5
        if iname == 'Pyramidal':
            thres = 5

        maxtab, mintab = peakdet(V_soma, 1, thres)

        duration = 1000
        start = int(499 / dt)
        end = int((499 + duration) / dt)

        if i == -0.1:
            DV = (V_soma[end] - V_soma[start]) * mV
            DVmax = (min(V_soma[start:end]) - V_soma[start]) * mV
            sag_ratio.append(DV / DVmax)
            Rin.append(DV / (i * nA))  # mega Ohm
            V_peak.append(V_soma[end] - V_soma[start])
            Iinj.append(i)
        elif i > 0:
            frequency.append(len(maxtab))
            Iinj2.append(i)