def get_response(sr, audio, f, bw=0, damping=0.0001):
    """ Get DetectorBank output
    
        Parameters
        ----------
        sr : int
            Sample rate
        audio : array
            Input samples
        f : one-element array
            Frequency to test, in an array
        bw : float
            Bandwidth. Default is minimum bandwidth
        damping : float
            Damping factor. Default is 0.0001
    """
    
    method = DetectorBank.runge_kutta
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_unnormalized
    gain = 1
    
    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))
     
    z = np.zeros((len(f),len(audio)), dtype=np.complex128)    
    r = np.zeros(z.shape) 
    
    det = DetectorBank(sr, audio, 4, det_char, method|f_norm|a_norm, damping, 
                       gain)
    
    det.getZ(z) 
    det.absZ(r, z)
    
    return r[0]
示例#2
0
def plot_complex(f0, a_norm, do_plots):

    sr = 48000

    dur = 3
    t = np.linspace(0, 2 * np.pi * f0 * dur, sr * dur)
    audio = np.sin(t)
    audio = np.append(audio, np.zeros(sr))

    method = DetectorBank.runge_kutta
    f_norm = DetectorBank.freq_unnormalized

    d = 0.0001
    gain = 5
    f = np.array([f0])
    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))

    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d, gain)

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    det.getZ(z)

    r = np.zeros(z.shape)
    det.absZ(r, z)

    # number of oscillations to plot
    nOsc = 5
    # signal may have been modulated by DetectorBank
    # detFreq is the frequency the detector is actually operating at, therefore
    # the frequency of the orbits
    detFreq = det.getW(0) / (2 * np.pi)
    # number of samples in nOsc
    sOsc = int(nOsc / detFreq * sr)

    t0 = (dur * sr) - sOsc  # int(0.0 * sr)
    t1 = dur * sr  # int(nOsc/f[0] * sr)

    x = z[0].real[t0:t1]
    y = z[0].imag[t0:t1]

    c = ['darkmagenta']

    a, b = getEllipseParams(x, y)
    e = np.sqrt(a**2 - b**2) / a

    if do_plots:
        plt.plot(x, y, c[0])
        plt.grid()

        plt.title('{}Hz'.format(f0))
        plt.xlabel('real')
        plt.ylabel('imag')

        plt.show()
        plt.close()

    return e
示例#3
0
    def _get_maxima(self, progress):
        # make DetectorBank, get responses and return max value in response

        size = len(self.f)

        maxima = np.zeros(size)

        # don't need all responses at once - just get max for each frequency
        # run in blocks of numPerRun channels to try and speed it up a bit

        numPerRun = 100

        numBlocks = int(np.ceil(size / numPerRun))
        count = 0

        i0, i1 = 0, 0

        more = True
        while more:
            i0 = i1
            i1 += numPerRun
            if i1 >= size:
                i1 = size
                more = False

            if progress:
                print('Running block {} of {}'.format(count, numBlocks),
                      end='\r')
#                print('{:.0f}%'.format(100*(i0/size)), end='\r')

            freq = self.f[i0:i1]
            bandwidth = np.zeros(len(freq))
            det_char = np.column_stack((freq, bandwidth))

            det = DetectorBank(self.sr, self.audio.astype(np.float32), 4,
                               det_char, *self.dbp)

            # get output
            z = np.zeros((len(freq), len(self.audio)), dtype=np.complex128)
            r = np.zeros(z.shape)
            det.getZ(z)
            det.absZ(r, z)

            for n in range(len(freq)):

                if np.isinf(np.max(r[n])):
                    file = 'debug/{:.0f}Hz.csv'.format(freq[n])
                    np.savetxt(file, r[n], delimiter='\t')

                maxima[i0 + n] = np.max(r[n])

            count += 1

#            maxima[n] = m

        return maxima
示例#4
0
def absZ_test(sr):
    """Feed a signal at given sample rate
    to the detectorbank and take the absolute value
    of the response.
    
    Return the result, and max error comparing
    the abzZ method results and numpy.abs() results."""
    f = np.array([200])
    t = np.linspace(0, f[0]*2*np.pi, sr)
    audio = np.sin(t)
    #audio = np.append(audio, np.zeros(sr))

    z = np.zeros((len(f),len(audio)), dtype=np.complex128)
    method = DetectorBank.runge_kutta
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_normalized
    d = 0.0001
    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))
    gain = 25
    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char, 
                    method|f_norm|a_norm, d, gain)

    det.getZ(z)

    r = np.zeros(z.shape)
    _ = det.absZ(r, z)

    return r, np.amax(np.abs(r-np.abs(z)))
    def _get_abs_z(self, freq, bandwidth, dbp):
        # make a DetectorBank with the given parameters and return |z|

        size = len(freq)
        bw = np.zeros(size)
        bw.fill(bandwidth)

        det_char = np.array(list(zip(freq, bw)))

        det = DetectorBank(self.sr, self.audio.astype(np.float32), 0, det_char,
                           *dbp)

        # get output
        z = np.zeros((size, len(self.audio)), dtype=np.complex128)
        r = np.zeros(z.shape)
        det.getZ(z, size)
        det.absZ(r, z)

        return r
示例#6
0
def get_responses(sr, audio, f):
    
    method = DetectorBank.runge_kutta
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_normalized
    gain = 25
    d = 0.0001
    bandwidth = np.zeros(len(f))
    #bandwidth.fill(5)  # uncomment for degerate detectors
    det_char = np.array(list(zip(f, bandwidth)))
     
    z = np.zeros((len(f),len(audio)), dtype=np.complex128)    
    r = np.zeros(z.shape) 
    
    det = DetectorBank(sr, audio, 4, det_char, method|f_norm|a_norm, d, gain)
    
    det.getZ(z) 
    det.absZ(r, z)
    
    return r
示例#7
0
def getResponses(sr, mode, method, f_norm):
    """ Get DetectorBank responses
    
        Parameters
        ----------
        sr : int
            sample rate
        mode : {'a', 'low', 'high'}
            frequency range
        method : DetectorBank Feature
            DetectorBank numerical method
        f_norm : DetectorBank Feature
            DetectorBank frequency normalisaion
            
        Returns
        -------
        2D array of |z| values
    """

    f = getFrequencies(mode)

    audio = make_audio(f, np.ones(len(f)), sr)

    d = 0.0001
    gain = 5
    a_norm = DetectorBank.amp_unnormalized
    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    r = np.zeros(z.shape)

    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d, gain)

    det.getZ(z)
    det.absZ(r, z)

    return r
    def _get_abs_z(self, freq, flc):
        # make a DetectorBank with the given parameters and return |z|

        size = len(freq)
        bw = np.zeros(size)
        bw.fill(flc)

        det_char = np.array(list(zip(freq, bw)))

        # NB have changed DetectorBank code to regard det_char as freq,fLc
        # pairs instead of freq,bw pairs
        # all that is required to do this is change AbstractDetector::getLyapunov
        # to simply return the value it is passed
        # i.e. we pretend that 'bandwidth' values are Lyapunov coeff
        det = DetectorBank(self.sr, self.audio.astype(np.float32), 0, det_char,
                           *self.dbp)

        # get output
        z = np.zeros((size, len(self.audio)), dtype=np.complex128)
        r = np.zeros(z.shape)
        det.getZ(z, size)
        det.absZ(r, z)

        return r
示例#9
0
def getMax(f0, sr, method, f_norm):
    """ Get maximum abs value in response. Also returns internal detector
        frequency (which will be different from f0 if search normalisation is
        used)
        
        Parameters
        ----------
        fo : float
            Centre frequency
        sr : int
            Sample rate
        method : DetectorBank Feature
            Numerical method
        f_norm : DetectorBank Feature
            Frequency normalisation
    
        Returns
        -------
        z value at point which is max value in |z|, max value in |z|,
        frequency used by detector 
    """

    f = np.array([f0])
    b = np.zeros(len(f))
    det_char = np.array(list(zip(f, b)))
    d = 0.0001
    amp = 5
    a_norm = DetectorBank.amp_unnormalized

    audio = make_audio(f0, 1, 4, sr)

    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d, amp)

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    r = np.zeros(z.shape)

    det.getZ(z)
    m = det.absZ(r, z)

    i = np.where(r[0] == m)[0][0]
    mz = z[0][i]

    f_adjusted = det.getW(0) / (2 * np.pi)

    return mz, m, f_adjusted
    def _get_max(self, f, dbp):
        # make DetectorBank, get responses and return max value in response

        f = np.array([f])
        size = len(f)
        bandwidth = np.zeros(size)
        det_char = np.array(list(zip(f, bandwidth)))

        det = DetectorBank(self.sr, self.audio.astype(np.float32), 0, det_char,
                           *dbp)

        # get output
        z = np.zeros((size, len(self.audio)), dtype=np.complex128)
        r = np.zeros(z.shape)
        det.getZ(z, size)
        m = det.absZ(r, z)

        return m
示例#11
0
def getLyapunov(fd, f0, sr, plot=True, b=None):

    freq = np.array([f0 - fd, f0, f0 + fd])

    if b is None:
        b = get_b(2 * fd)
    print('Frequency difference: {}Hz; Bandwidth: {}Hz'.format(fd, 2 * fd))
    print('First Lyapunov coefficient: {:.3f}'.format(b))

    method = DetectorBank.runge_kutta
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_unnormalized
    gain = 1
    damping = 0.0001
    dbp = (method | f_norm | a_norm, damping, gain)

    audio = make_input(f0, sr, 3)
    audio *= 25

    size = len(freq)
    bw = np.zeros(size)
    bw.fill(b)

    det_char = np.array(list(zip(freq, bw)))

    # NB have changed DetectorBank code to regard det_char as freq,fLc
    # pairs instead of freq,bw pairs
    # all that is required to do this is change AbstractDetector::getLyapunov
    # to simply return the value it is passed
    # i.e. we pretend that 'bandwidth' values are Lyapunov coeff
    det = DetectorBank(sr, audio.astype(np.float32), 0, det_char, *dbp)

    # get output
    z = np.zeros((size, len(audio)), dtype=np.complex128)
    r = np.zeros(z.shape)
    det.getZ(z, size)
    det.absZ(r, z)

    t = np.linspace(0, r.shape[1] / sr, r.shape[1])
    c = ['blue', 'darkmagenta', 'red']

    mx_centre = np.max(r[np.where(freq == f0)[0][0]])

    maxima = np.array([np.max(k) for k in r])
    ratio = maxima / mx_centre
    ratio_db = 20 * np.log10(ratio)

    for k in range(r.shape[0]):

        print('{:.0f}Hz; max = {:.4f}; ratio = {:.4f}; {:.4f}dB'.format(
            freq[k], maxima[k], ratio[k], ratio_db[k]))

        if plot:
            line, = plt.plot(t, r[k], c[k])

    mean = (ratio_db[0] + ratio_db[2]) / 2
    diff = abs(-3 - mean)
    print('Mean ampltiude: {:.4f}dB'.format(mean))
    print('Difference from -3dB: {:.4f}dB'.format(diff))

    if plot:
        plt.show()
        plt.close()
示例#12
0
def get_responses(file,
                  damping,
                  sp,
                  tp=None,
                  fp=None,
                  fn=None,
                  sl=None,
                  t0=None,
                  t1=None,
                  spa=None):

    sns.set_style('whitegrid')

    head, tail = os.path.split(file)
    base, ext = os.path.splitext(tail)

    # `base` is file name with data separated by dots
    # go backwards through each item and see if it is a note
    baselst = base.split('.')
    i = -1
    while True:
        # if we've gone as far back as we can without finding a note, exit
        if i <= -len(baselst):
            msg = 'Cannot parse note in filename {}'.format(tail)
            raise ValueError(msg)
        # check if the file is a (xylophone) glissandi
        if baselst[i].lower() == 'gliss':
            # assuming same xylophone used for all recordings
            if baselst[i + 1].lower() == 'up':
                k = get_note_num('F4')
                break
            elif baselst[i + 1].lower() == 'down':
                k = get_note_num('C8')
                break
#            note_n = list(range(k0, k1+1))
# try this item
        try:
            # get note number relative to A4 (for frequency calculation)
            k = get_note_num(baselst[i])
            break
        # if this item isn't a note, go backwards and try again
        except ValueError:
            i -= 1

    edo = 12
    method = DetectorBank.runge_kutta
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_unnormalized
    gain = 50

    bw = {1e-4: 0.922, 2e-4: 1.832, 3e-4: 2.752, 4e-4: 3.606, 5e-4: 4.86}

    f0 = 440 * 2**(k / edo)
    f = get_f(f0, b=bw[damping], edo=edo)

    print('Band size: {}'.format(len(f)))

    audio, sr = sf.read(file)

    if spa is not None:
        plot_audio(sr, audio, spa, t0, t1, tp=tp, fp=fp)

    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))
    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, damping, gain)

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    r = np.zeros(z.shape)
    det.getZ(z)
    det.absZ(r, z)

    c = [
        'black', 'blue', 'chocolate', 'cyan', 'darkmagenta', 'khaki',
        'deeppink', 'aquamarine', 'darkorange', 'firebrick', 'green',
        'lightslategrey', 'dodgerblue', 'magenta', 'mediumvioletred', 'orange',
        'pink', 'red', 'skyblue', 'lightgrey', 'yellow'
    ]

    centre_idx = c.index('green')
    centre_f = np.where(f == f0)[0][0]
    color_offset = centre_idx - centre_f

    print('Centre freq at index: {}'.format(centre_f))
    print('Colour offset: {}'.format(color_offset))

    t = np.linspace(0, r.shape[1] / sr, r.shape[1])
    if t0 is None:
        t0 = 0
    else:
        t0 = int(sr * t0)
    if t1 is None:
        t1 = len(audio)
    else:
        t1 = int(sr * t1)

    for k in range(r.shape[0]):
        plt.plot(t[t0:t1],
                 r[k][t0:t1],
                 color=c[(k + color_offset) % len(c)],
                 label='{:.3f}Hz'.format(f[k]))

    if tp is not None:
        for onset in tp:
            plt.axvline(onset, color='lime')
    if fp is not None:
        for onset in fp:
            plt.axvline(onset, color='mediumorchid', linestyle='--')
    if fn is not None:
        for onset in fn:
            plt.axvline(onset, color='indigo', linestyle='--')

    plt.xlabel('Time (s)')
    plt.ylabel('|z|', rotation='horizontal', labelpad=10)
    #    ax = plt.gca()
    #    ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))

    if sl is not None:
        colours = [c[k + color_offset] for k in range(r.shape[0])]
        labels = ['{:.3f} Hz'.format(f[k]) for k in range(r.shape[0])]
        sl.plot(labels=labels,
                colours=colours,
                ncol=1,
                title='Detector frequencies')


#    else:
#        plt.legend()

    plt.grid(True)

    sp.plot(plt)
示例#13
0
audio = mix(f, amplitudes)

r = np.zeros((len(d), len(audio)))

for n in range(len(d)):

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)

    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d[n], gain)

    det.getZ(z)

    r0 = np.zeros(z.shape)

    m = det.absZ(r0, z)

    r[n] = r0

for k in range(len(r) - 1):

    rxamp = max(r[k]) / np.e
    mxtm = np.where(r[k] == max(r[k]))[0][0]
    rxtm = np.where(r[k][mxtm:] <= rxamp)[0][0]
    print('Damping: {:.0e}; Relaxation Time: {:.4f}'.format(d[k], rxtm / sr))

c = ['blue', 'darkmagenta', 'red', 'green']
style = ['-', '-', '-', '-']

majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.25)
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_unnormalized
    d = 0.0001
    gain = 25
    f = np.array([f0])
    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))

    # get response
    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d, gain)

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    r = np.zeros(z.shape)
    det.getZ(z)
    det.absZ(r, z)

    # put in list
    responses.append(r[0])

    # plot 5Hz response
    if f0 == 5:
        t = np.linspace(0, len(audio) / sr, len(audio))
        plt.plot(t, r[0], color='darkmagenta')

plt.ylabel('|z|', rotation='horizontal')
plt.xlabel('Time (s)')
plt.grid(True)

ax = plt.gca()
ax.yaxis.labelpad = 10
def plot_complex(f0, i, nOsc, end):

    d = 0.0001

    sr = 48000

    dur = 5
    t = np.linspace(0, 2 * np.pi * f0 * dur, sr * dur)
    audio = np.sin(t)
    audio = np.append(audio, np.zeros(sr))
    gain = 5

    method = DetectorBank.runge_kutta  #central_difference #
    f_norm = DetectorBank.freq_unnormalized
    a_norm = DetectorBank.amp_unnormalized
    d = 0.0001
    gain = 5
    f = np.array([f0])
    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))

    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d, gain)

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    det.getZ(z)

    r = np.zeros(z.shape)
    det.absZ(r, z)

    # number of oscillations to plot
    # signal may have been modulated by DetectorBank
    # detFreq is the frequency the detector is actually operating at, therefore
    # the frequency of the orbits
    detFreq = det.getW(0) / (2 * np.pi)
    # number of samples in nOsc
    sOsc = int(nOsc / detFreq * sr)

    if end == 'first':
        t0 = 0
        t1 = sOsc
    elif end == 'last':
        t0 = (dur * sr) - sOsc
        t1 = dur * sr

    x = z[0].real[t0:t1]
    y = z[0].imag[t0:t1]

    c = ['darkmagenta']

    a, b = getEllipseParams(x, y)
    e = np.sqrt(a**2 - b**2) / a

    plt.plot(x, y, c[0])
    plt.grid(True)

    plt.xlabel('real')
    plt.ylabel('imag')

    fig = plt.gcf()
    fig.set_size_inches(8, 8)

    plt.show()
    plt.close()

    return e
def plotMean(audio, sr, freq, onset, found, sp):

    sns.set_style('whitegrid')

    if audio.ndim > 1:
        audio = np.mean(audio, axis=1)

    offset = 1 / 4
    pad = np.zeros(int(sr * offset))
    #    pad.fill(audio[0])

    audio = np.append(pad, audio)

    params = getParams()
    method = params['method']
    f_norm = params['f_norm']
    a_norm = params['a_norm']
    d = params['damping']
    gain = params['gain']

    bw = params['real_bandwidth']
    edo = params['edo']

    f = makeBand(freq, bw, edo)

    bandwidth = np.zeros(len(f))
    det_char = np.array(list(zip(f, bandwidth)))

    det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                       method | f_norm | a_norm, d, gain)

    z = np.zeros((len(f), len(audio)), dtype=np.complex128)
    det.getZ(z)

    r = np.zeros(z.shape)
    det.absZ(r, z)

    meanlog = np.zeros(len(audio))

    for n in range(len(meanlog)):
        mean = 0
        for k in range(det.getChans()):
            mean += zeroLog(r[k, n])
        meanlog[n] = mean

    t = np.linspace(0, len(audio) / sr, len(audio))

    t0 = 0.25
    t1 = 0.5

    i0 = int(sr * t0)
    i1 = int(sr * t1)

    plt.figure()
    plt.plot(t[i0:i1], meanlog[i0:i1])

    plt.axvline(onset + offset, color='lime')  # linestyle='--', )
    for t in found:
        plt.axvline(t + offset, linestyle='--', color='red')

    ax = plt.gca()
    xtx = ax.get_xticks()
    xtxlab = ['{:.0f}'.format(1000 * (item - offset)) for item in xtx]
    ax.set_xticklabels(xtxlab)

    #    plt.title('{:.3f}Hz'.format(freq))
    plt.xlabel('Time (ms)')
    #    plt.ylabel('Log(|z|)')
    plt.ylabel('Mean log')
    plt.grid(True)

    sp.plot(plt)
示例#17
0
damping = 0.0001
# minimum bandwidth detectors
bandwidth = np.zeros(len(f))
#bandwidth[1] = 5
#bandwidth[2] = 7
#bandwidth.fill(5)
det_char = np.array(list(zip(f, bandwidth)))
gain = 5
det = DetectorBank(sr, audio.astype(np.float32), 4, det_char, 
                   method|f_norm|a_norm, damping, gain)

z = np.zeros((len(f),len(audio)), dtype=np.complex128)  
det.getZ(z)

r = np.zeros(z.shape)
m = det.absZ(r, z)

c = ['darkmagenta', 'red', 'blue', 'green']

t = np.linspace(0, r.shape[1]/sr, r.shape[1])
for k in range(r.shape[0]):
    line, = plt.plot(t, r[k], 'darkmagenta') 
    
ax = plt.gca()
plt.xlabel('Time (s)')
plt.ylabel('|z|', rotation='horizontal')
#ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax.yaxis.labelpad = 10
ax.grid(True)
plt.show()
plt.close()
def get_rise_relax_times(d,
                         f0,
                         sr,
                         method,
                         f_norm,
                         a_norm,
                         gain,
                         plot_times,
                         sp=None):
    """ Get rise and relax times for a DetectorBank at five damping factors
    
        Parameters
        ----------
        d : list
            List of damping factors to test
        f0 : float
            Centre frequency
        sr : int
            Sample rate
        method : { DetectorBank.runge_kutta, DetectorBank.central_difference }
            DetectorBank method
        f_norm : { DetectorBank.freq_unnormalized, DetectorBank.search_normalized}
            DetectorBank frequency normalisation
        a_norm : { DetectorBank.amp_unnormalized, DetectorBank.amp_normalized}
            DetectorBank amplitude normalisation
        gain : float
            Input gain
        plot_times : bool
            Whether or not to plot the responses
        sp : list of SavePlot object
            If plot=True, please also provide a SavePlot object for rise and
            relax
            
        Returns
        -------
        Two lists of tuples: rise times and relax times.
        
        In each case, the tuples are the boundary times (10% time and 90% time
        or max time and 1/e time). The rise and relaxation times are the
        difference between these values.
    """

    # make input
    # 'dur' is tone duration
    # 2 seconds of silence will automatically be appended
    dur = 3
    audio = make_audio([f0], sr, dur)

    # make frequency/bandwidth pairs
    f = np.array([f0])
    b = np.zeros(len(f))
    det_char = np.array(list(zip(f, b)))

    # relaxation times
    rxtms = []
    # rise times
    rstms = []
    # rise time is time from 10% to 90% of final value
    r_min, r_max = 0.1, 0.9

    # if we're plotting, we'll have to store all the responses
    if plot_times:
        all_r = np.zeros((len(d), len(audio)))

        # also check there is a SavePlot object
        if sp is None:
            raise ValueError('Please provide a SavePlot object is you wish to '
                             'plot the responses')

    # get response for each damping factor
    for n in range(len(d)):

        z = np.zeros((len(f), len(audio)), dtype=np.complex128)
        r = np.zeros(z.shape)

        det = DetectorBank(sr, audio.astype(np.float32), 4, det_char,
                           method | f_norm | a_norm, d[n], gain)
        det.getZ(z)
        mx = det.absZ(r, z)

        if plot_times:
            all_r[n] = r

        # r is a 2D array. As there's only one channel, r[0] contains the output

        # rise time
        rise0 = np.where(r[0] >= r_min * mx)[0][0]
        rise1 = np.where(r[0] >= r_max * mx)[0][0]
        rstms.append((rise0, rise1))

        # relaxation time is time for amplitude to fall to 1/e of max
        rxamp = max(r[0]) / np.e
        # max is end of tone
        # NB can't say np.where(r[k]==mx), as this may return values before the
        # end of the tone
        mxtm = sr * dur
        # samples from max time to 1/e
        rxtm = np.where(r[0][mxtm:] <= rxamp)[0][0]
        rxtms.append((mxtm, mxtm + rxtm))

    if plot_times:
        plot(all_r, rstms, sr, len(audio), sp[0], t0=0, t1=1.1)
        plot(all_r, rxtms, sr, len(audio), sp[1], t0=2.95, t1=3.65)

    return rstms, rxtms