Ejemplo n.º 1
0
def impulse(samplingRate=None, fftDegree=None):
    """
    Generates a normalized impulse signal at time zero,
    with zeros to fill the time length
    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames == ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    if samplingRate is None:
        samplingRate = default.samplingRate
    if fftDegree is None:
        fftDegree = default.fftDegree

    numSamples = 2**fftDegree
    impulseSignal = np.zeros(numSamples)
    impulseSignal[0] = 1.0
    impulseSignal = SignalObj(signalArray=impulseSignal,
                              domain='time',
                              samplingRate=samplingRate,
                              signalType='energy')
    impulseSignal.creation_name = creation_name
    return impulseSignal
Ejemplo n.º 2
0
def merge(signal1, *signalObjects):
    """Gather all channels of the signalObjs given as input arguments into a single SignalObj."""
    j = 1
    freqMin = cp.deepcopy(signal1.freqMin)
    freqMax = cp.deepcopy(signal1.freqMax)
    comment = cp.deepcopy(signal1.comment)
    channels = cp.deepcopy(signal1.channels)
    timeSignal = cp.deepcopy(signal1.timeSignal)
    for inObj in signalObjects:
        if signal1.samplingRate != inObj.samplingRate:
            message = '\
            \n To merge signals they must have the same sampling rate!\
            \n SignalObj 1 and ' + str(j +
                                       1) + ' have different sampling rates.'
            raise AttributeError(message)
        if signal1.numSamples != inObj.numSamples:
            message = '\
            \n To merge signals they must have the same length!\
            \n SignalObj 1 and ' + str(j + 1) + ' have different lengths.'
            raise AttributeError(message)
        comment = comment + ' / ' + inObj.comment
        for ch in inObj.channels._channels:
            channels.append(ch)
        timeSignal = np.hstack((timeSignal, inObj.timeSignal))
        j += 1
    newSignal = SignalObj(timeSignal,
                          domain='time',
                          samplingRate=signal1.samplingRate,
                          freqMin=freqMin,
                          freqMax=freqMax,
                          comment=comment)
    channels.conform_to()
    newSignal.channels = channels
    return newSignal
Ejemplo n.º 3
0
    def filter(self, sigobj):
        """
        Filter the signal object.

        For each channel inside the input signalObj, will be generated a new
        SignalObj with the channel filtered signal.

        Args:
            sigobj: SignalObj

        Return:
            output: List
                A list containing one SignalObj with the filtered data for each
                channel in the original signalObj.

        """
        if self.samplingRate != sigobj.samplingRate:
            raise ValueError(
                "SignalObj must have same sampling rate of filter to be filtered."
            )
        n = self.sos.shape[2]
        output = []
        chl = []
        for ch in range(sigobj.numChannels):
            sobj = sigobj[ch]
            filtered = np.zeros((sobj.numSamples, n))
            for k in range(n):
                cContigousArray = sobj.timeSignal[:].copy(order='C')
                filtered[:, k] = ss.sosfilt(self.sos[:, :, k].copy(order='C'),
                                            cContigousArray,
                                            axis=0).T
                chl.append(copy(sobj.channels[sobj.channels.mapping[0]]))
                chl[-1].num = k + 1
                chl[-1].name = f'Band {k+1}'
                chl[-1].code = f'B{k+1}'
            signalDict = {
                'signalArray': filtered,
                'domain': 'time',
                'samplingRate': self.samplingRate,
                'freqMin': sigobj.freqMin,
                'freqMax': sigobj.freqMax,
            }
            out = SignalObj(**signalDict)
            out.channels = ChannelsList(chl)
            # out.timeSignal = out.timeSignal * out.channels.CFlist()
            output.append(out)
            chl.clear()
        return output
Ejemplo n.º 4
0
def resample(signal, newSamplingRate):
    """
    Resample the timeSignal of the input SignalObj to the
    given sample rate using the scipy.signal.resample() function
    """
    newSignalSize = np.int(signal.timeLength * newSamplingRate)
    resampled = ss.resample(signal.timeSignal[:], newSignalSize)
    newSignal = SignalObj(resampled, "time", newSamplingRate)
    return newSignal
Ejemplo n.º 5
0
def read_wav(fileName):
    """Read a wave file into a SignalObj."""
    samplingRate, data = wf.read(fileName)
    if data.dtype == 'int16':
        data = data / (2**15)
    if data.dtype == 'int32':
        data = data / (2**31)
    signal = SignalObj(data, 'time', samplingRate=samplingRate)
    return signal
Ejemplo n.º 6
0
def impulse(samplingRate=None, fftDegree=None):
    """
    Generates a normalized impulse signal at time zero,
    with zeros to fill the time length
    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames is ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    if samplingRate is None:
        samplingRate = default.samplingRate
    if fftDegree is None:
        fftDegree = default.fftDegree

    numSamples = 2**fftDegree
    # FIXME: I don't know why you created this way. I guess it would be better
    # to just create a vector of zeros and then substitute the first sample by
    # 1.
    # =========================================================================
    #     impulseSignal = (numSamples / samplingRate) \
    #         * np.ones(numSamples) + 1j * np.random.randn(numSamples)
    #     impulseSignal = np.real(np.fft.ifft(impulseSignal))
    #     impulseSignal = impulseSignal / max(impulseSignal)
    # =========================================================================
    impulseSignal = np.zeros(numSamples)
    impulseSignal[0] = 1.0
    impulseSignal = SignalObj(signalArray=impulseSignal,
                              domain='time',
                              samplingRate=samplingRate,
                              signalType='energy')
    impulseSignal.creation_name = creation_name
    return impulseSignal
Ejemplo n.º 7
0
def fft_convolve(signal1, signal2):
    """
    Use scipy.signal.fftconvolve() to convolve two time domain signals.

        >>> convolution = pytta.fft_convolve(signal1,signal2)

    """
    #    Fs = signal1.Fs
    conv = ss.fftconvolve(signal1.timeSignal, signal2.timeSignal)
    signal = SignalObj(conv, 'time', signal1.samplingRate)
    return signal
Ejemplo n.º 8
0
 def filter(self, signalObj):
     if self.samplingRate != signalObj.samplingRate:
         raise ValueError("SignalObj must have same sampling\
                          rate of filter to be filtered.")
     output = []
     for ch in range(signalObj.numChannels):
         filtered = np.zeros((signalObj.numSamples))
         filtered[:] = ss.sosfilt(self.sos[:, :],
                                  signalObj.timeSignal[:, ch],
                                  axis=0).T
         signalDict = {
             'signalArray': filtered,
             'domain': 'time',
             'samplingRate': self.samplingRate,
             'freqMin': self.band[0],
             'freqMax': self.band[1]
         }
         output.append(SignalObj(**signalDict))
     else:
         return output
Ejemplo n.º 9
0
def sin(Arms=0.5,
        freq=1000,
        timeLength=1,
        phase=2 * np.pi,
        samplingRate=default.samplingRate,
        fftDegree=None):
    """
    Generates a sine signal with the traditional parameters plus some PyTTa
    options.

    Creation parameters:
    --------------------

        * Arms (float) (optional):
            The signal's RMS amplitude.

            >>> Apeak = Arms*sqrt(2);

        * freq (float) (optional):
            Nothing to say;

        * timeLength (float) (optional):
            Sine timeLength in seconds;

        * fftDegree (int) (optional);
            2**fftDegree signal's number of samples;

        * phase (float) (optional):
            Sine phase in radians;

        * samplingRate (int) (optional):
            Nothing to say;

    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames == ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    if fftDegree is not None:
        timeLength = 2**(fftDegree) / samplingRate

    t = np.linspace(0, timeLength - (1 / samplingRate),
                    int(samplingRate * timeLength))
    sin = Arms * (2**(1 / 2)) * np.sin(2 * np.pi * freq * t + phase)
    sinSigObj = SignalObj(sin,
                          domain='time',
                          samplingRate=samplingRate,
                          freqMin=default.freqMin,
                          freqMax=default.freqMax)
    sinSigObj.creation_name = creation_name
    return sinSigObj
Ejemplo n.º 10
0
def random_noise(kind='white',
                 samplingRate=None,
                 fftDegree=None,
                 startMargin=None,
                 stopMargin=None,
                 windowing='hann'):
    """
    Generates a noise of kind White, Pink (TO DO) or Blue (TO DO), with a
    silence at the beginning and ending of the signal, plus a fade in to avoid
    abrupt speaker excursion. All noises have normalized amplitude.

        White noise is generated using numpy.randn between [[1];[-1]];

        Pink noise is still in progress;

        Blue noise is still in progress;
    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames == ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    # It was done like this because a function default argument is a value
    # assigned at import time, and PyTTa have a default object that handles
    # default values for all functions and all classes across all submodules.
    # In order to it work as expected, the values should be reassigned at
    # every function call to get updated default values. Otherwise, despite
    # how the default has it's properties values changed, it won't change
    # for the function calls.
    if samplingRate is None:
        samplingRate = default.samplingRate
    if fftDegree is None:
        fftDegree = default.fftDegree
    if startMargin is None:
        startMargin = default.startMargin
    if stopMargin is None:
        stopMargin = default.stopMargin

    # [samples] initial silence number of samples
    stopSamples = round(stopMargin * samplingRate)

    # [samples] ending silence number of samples
    startSamples = round(startMargin * samplingRate)

    # [samples] total silence number of samples
    marginSamples = startSamples + stopSamples

    # [samples] full signal number of samples
    numSamples = 2**fftDegree

    # [samples] Actual noise number of samples
    noiseSamples = int(numSamples - marginSamples)
    if kind.upper() in ['WHITE', 'FLAT']:
        noiseSignal = np.random.randn(noiseSamples)


#    elif kind.upper() == 'PINK:  # TODO
#        noiseSignal = np.randn(Nnoise)
#        noiseSignal = noiseSignal/max(abs(noiseSignal))
#        noiseSignal = __do_pink_filtering(noiseSignal)
#    elif kind.upper() == 'BLUE:  # TODO
#        noiseSignal = np.randn(Nnoise)
#        noiseSignal = noiseSignal/max(abs(noiseSignal))
#        noiseSignal = __do_blue_filtering(noiseSignal)

    noiseSignal = __do_noise_windowing(noiseSignal, noiseSamples, windowing)
    noiseSignal = noiseSignal / max(abs(noiseSignal))
    noiseSignal = np.concatenate(
        (np.zeros(int(startSamples)), noiseSignal, np.zeros(int(stopSamples))))
    noiseSignal = SignalObj(signalArray=noiseSignal,
                            domain='time',
                            freqMin=default.freqMin,
                            freqMax=default.freqMax,
                            samplingRate=samplingRate)
    noiseSignal.creation_name = creation_name
    return noiseSignal
Ejemplo n.º 11
0
def sweep(freqMin=None,
          freqMax=None,
          samplingRate=None,
          fftDegree=None,
          startMargin=None,
          stopMargin=None,
          method='logarithmic',
          windowing='hann'):
    """
    Generates a chirp signal defined by the "method" input, windowed, with
    silence interval at the beggining and end of the signal, plus a hanning
    fade in and fade out.

    >>> x = pytta.generate.sweep()
    >>> x.plot_time()

    Return a signalObj containing a logarithmic chirp signal from 17.8 Hz
    to 22050 Hz, with a fade in beginning at 17.8 Hz time instant and ending at
    the 20 Hz time instant; plus a fade out beginning at 20000 Hz time instant
    and ending at 22050 Hz time instant.

    The fade in and the fade out are made with half hanning window. First half
    for the fade in and last half for the fade out. Different number of points
    are used for each fade, so the number of time samples during each frequency
    is respected.

    Input arguments (default), (type):
    ------------------------

        * freqMin (20), (float)

        * freqMax (20), (float)

        * samplingRate (44100), (int)

        * fftDegree (18), (float)

        * startMargin (0.3), (float)

        * stopMargin (0.7), (float)

        * method (logarithmic'), (string)

        * windowing ('hann'), (string)


    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames == ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    # It was done like this because a function default argument is a value
    # assigned at import time, and PyTTa have a default object that handles
    # default values for all functions and all classes across all submodules.
    # In order to it work as expected, the values should be reassigned at
    # every function call to get updated default values. Otherwise, despite
    # how the default has it's properties values changed, it won't change
    # for the function calls.
    if freqMin is None:
        freqMin = default.freqMin
    if freqMax is None:
        freqMax = default.freqMax
    if samplingRate is None:
        samplingRate = default.samplingRate
    if fftDegree is None:
        fftDegree = default.fftDegree
    if startMargin is None:
        startMargin = default.startMargin
    if stopMargin is None:
        stopMargin = default.stopMargin

    # frequency limits [Hz]
    freqLimits = {
        'freqMin': freqMin / (2**(1 / 6)),
        'freqMax': min(freqMax * (2**(1 / 6)), samplingRate / 2)
    }
    samplingTime = 1 / samplingRate  # [s] sampling period

    stopSamples = stopMargin * samplingRate
    # [samples] initial silence number of samples

    startSamples = startMargin * samplingRate
    # [samples] ending silence number of samples

    marginSamples = startSamples + stopSamples
    # [samples] total silence number of samples

    numSamples = 2**fftDegree  # [samples] full signal number of samples

    sweepSamples = numSamples - marginSamples + 1
    # [samples] actual sweep number of samples

    if sweepSamples < samplingRate / 10:
        raise Exception('Too small resultant sweep. For such big margins you' +
                        ' must increase your fftDegree.')

    sweepTime = sweepSamples / samplingRate  # [s] sweep's time length
    timeVecSweep = np.arange(0, sweepTime, samplingTime)  # [s] time vector
    if timeVecSweep.size > sweepSamples:
        timeVecSweep = timeVecSweep[0:int(sweepSamples)]  # adjust length
    sweep = 0.95 * ss.chirp(timeVecSweep,
                            freqLimits['freqMin'],
                            sweepTime,
                            freqLimits['freqMax'],
                            'logarithmic',
                            phi=-90)  # sweep, time domain
    sweep = __do_sweep_windowing(sweep, timeVecSweep, freqLimits, freqMin,
                                 freqMax, windowing)  # fade in and fade out
    # add initial and ending slices
    timeSignal = np.concatenate(
        (np.zeros(int(startSamples)), sweep, np.zeros(int(stopSamples))))
    if timeSignal.size != numSamples:
        timeSignal = timeSignal[0:int(numSamples)]  # adjust length

    # transforms into a pytta signalObj and sets the correct name
    sweepSignal = SignalObj(signalArray=timeSignal,
                            domain='time',
                            samplingRate=samplingRate,
                            **freqLimits)

    sweepSignal.creation_name = creation_name

    return sweepSignal
Ejemplo n.º 12
0
def noise(kind='white',
          samplingRate=None,
          fftDegree=None,
          startMargin=None,
          stopMargin=None,
          windowing='hann'):
    """
    Generates a noise of kind White, Pink (TO DO) or Blue (TO DO), with a
    silence at the beginning and ending of the signal, plus a fade in to avoid
    abrupt speaker excursioning. All noises have normalized amplitude.

        White noise is generated using numpy.randn between [[1];[-1]];
        # FIXME: This looks incorrect because the signal has normal
        # distribution, so no limits but an average and standard deviation.

        Pink noise is still in progress;

        Blue noise is still in progress;
    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames is ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    if samplingRate is None:
        samplingRate = default.samplingRate
    if fftDegree is None:
        fftDegree = default.fftDegree
    if startMargin is None:
        startMargin = default.startMargin
    if stopMargin is None:
        stopMargin = default.stopMargin

    # [samples] initial silence number of samples
    stopSamples = round(stopMargin * samplingRate)

    # [samples] ending silence number of samples
    startSamples = round(startMargin * samplingRate)

    # [samples] total silence number of samples
    marginSamples = startSamples + stopSamples

    # [samples] full signal number of samples
    numSamples = 2**fftDegree

    # [samples] Actual noise number of samples
    noiseSamples = int(numSamples - marginSamples)
    if kind.upper() in ['WHITE', 'FLAT']:
        noiseSignal = np.random.randn(noiseSamples)


#    elif kind.upper() == 'PINK:  # TODO
#        noiseSignal = np.randn(Nnoise)
#        noiseSignal = noiseSignal/max(abs(noiseSignal))
#        noiseSignal = __do_pink_filtering(noiseSignal)
#    elif kind.upper() == 'BLUE:  # TODO
#        noiseSignal = np.randn(Nnoise)
#        noiseSignal = noiseSignal/max(abs(noiseSignal))
#        noiseSignal = __do_blue_filtering(noiseSignal)

    noiseSignal = __do_noise_windowing(noiseSignal, noiseSamples, windowing)
    noiseSignal = noiseSignal / max(abs(noiseSignal))
    noiseSignal = np.concatenate(
        (np.zeros(int(startSamples)), noiseSignal, np.zeros(int(stopSamples))))
    noiseSignal = SignalObj(signalArray=noiseSignal,
                            domain='time',
                            freqMin=default.freqMin,
                            freqMax=default.freqMax,
                            samplingRate=samplingRate)
    noiseSignal.creation_name = creation_name
    return noiseSignal
Ejemplo n.º 13
0
def __parse_load(className):
    name = className.split('.')[0]
    jsonFile = open(className, 'r')
    openJson = json.load(jsonFile)
    if name == 'SignalObj':
        openMat = sio.loadmat(openJson['timeSignalAddress'])
        out = SignalObj(openMat['timeSignal'],
                        domain=openJson['lengthDomain'],
                        samplingRate=openJson['samplingRate'],
                        freqMin=openJson['freqLims'][0],
                        freqMax=openJson['freqLims'][1],
                        comment=openJson['comment'])
        out.channels = __parse_channels(openJson['channels'], out.channels)
        os.remove(openJson['timeSignalAddress'])

    elif name == 'ImpulsiveResponse':
        ir = pytta_load(openJson['SignalAddress']['ir'])
        out = ImpulsiveResponse(ir=ir, **openJson['methodInfo'])
        os.remove(openJson['SignalAddress']['ir'])

    elif name == 'RecMeasure':
        inch = list(np.arange(len(openJson['inChannels'])))
        out = RecMeasure(device=openJson['device'],
                         inChannels=inch,
                         lengthDomain='samples',
                         fftDegree=openJson['fftDegree'])
        out.inChannels = __parse_channels(openJson['inChannels'],
                                          out.inChannels)

    elif name == 'PlayRecMeasure':
        inch = list(1 + np.arange(len(openJson['inChannels'])))
        excit = pytta_load(openJson['excitationAddress'])
        out = PlayRecMeasure(excitation=excit,
                             device=openJson['device'],
                             inChannels=inch)
        out.inChannels = __parse_channels(openJson['inChannels'],
                                          out.inChannels)
        os.remove(openJson['excitationAddress'])

    elif name == 'FRFMeasure':
        inch = list(1 + np.arange(len(openJson['inChannels'])))
        excit = pytta_load(openJson['excitationAddress'])
        out = FRFMeasure(excitation=excit,
                         device=openJson['device'],
                         inChannels=inch)
        out.inChannels = __parse_channels(openJson['inChannels'],
                                          out.inChannels)
        os.remove(openJson['excitationAddress'])

    elif name == 'Meta':
        out = []
        for val in openJson.values():
            out.append(pytta_load(val))
            os.remove(val)
    os.remove(className)
    jsonFile.close()
    return out
Ejemplo n.º 14
0
def __h5_unpack(objH5Group):
    """
    Unpack an HDF5 group into its respective PyTTa object
    """
    if objH5Group.attrs['class'] == 'SignalObj':
        # PyTTaObj attrs unpacking
        samplingRate = objH5Group.attrs['samplingRate']
        freqMin = _h5.none_parser(objH5Group.attrs['freqMin'])
        freqMax = _h5.none_parser(objH5Group.attrs['freqMax'])
        lengthDomain = objH5Group.attrs['lengthDomain']
        comment = objH5Group.attrs['comment']
        # SignalObj attr unpacking
        channels = eval(objH5Group.attrs['channels'])
        # Added with an if for compatibilitie issues
        if 'signalType' in objH5Group.attrs:
            signalType = _h5.attr_parser(objH5Group.attrs['signalType'])
        else:
            signalType = 'power'
        # Creating and conforming SignalObj
        SigObj = SignalObj(signalArray=np.array(objH5Group['timeSignal']),
                           domain='time',
                           signalType=signalType,
                           samplingRate=samplingRate,
                           freqMin=freqMin,
                           freqMax=freqMax,
                           comment=comment)
        SigObj.channels = channels
        SigObj.lengthDomain = lengthDomain
        return SigObj

    elif objH5Group.attrs['class'] == 'ImpulsiveResponse':
        systemSignal = __h5_unpack(objH5Group['systemSignal'])
        method = objH5Group.attrs['method']
        winType = objH5Group.attrs['winType']
        winSize = objH5Group.attrs['winSize']
        overlap = objH5Group.attrs['overlap']
        IR = ImpulsiveResponse(method=method,
                               winType=winType,
                               winSize=winSize,
                               overlap=overlap,
                               ir=systemSignal)
        return IR

    elif objH5Group.attrs['class'] == 'RecMeasure':
        # PyTTaObj attrs unpacking
        samplingRate = objH5Group.attrs['samplingRate']
        freqMin = _h5.none_parser(objH5Group.attrs['freqMin'])
        freqMax = _h5.none_parser(objH5Group.attrs['freqMax'])
        comment = objH5Group.attrs['comment']
        lengthDomain = objH5Group.attrs['lengthDomain']
        fftDegree = objH5Group.attrs['fftDegree']
        timeLength = objH5Group.attrs['timeLength']
        # Measurement attrs unpacking
        device = _h5.list_w_int_parser(objH5Group.attrs['device'])
        inChannels = eval(objH5Group.attrs['inChannels'])
        blocking = objH5Group.attrs['blocking']
        # Recreating the object
        rObj = measurement(kind='rec',
                           device=device,
                           inChannels=inChannels,
                           blocking=blocking,
                           samplingRate=samplingRate,
                           freqMin=freqMin,
                           freqMax=freqMax,
                           comment=comment,
                           lengthDomain=lengthDomain,
                           fftDegree=fftDegree,
                           timeLength=timeLength)
        return rObj

    elif objH5Group.attrs['class'] == 'PlayRecMeasure':
        # PyTTaObj attrs unpacking
        samplingRate = objH5Group.attrs['samplingRate']
        freqMin = _h5.none_parser(objH5Group.attrs['freqMin'])
        freqMax = _h5.none_parser(objH5Group.attrs['freqMax'])
        comment = objH5Group.attrs['comment']
        lengthDomain = objH5Group.attrs['lengthDomain']
        fftDegree = objH5Group.attrs['fftDegree']
        timeLength = objH5Group.attrs['timeLength']
        # Measurement attrs unpacking
        device = _h5.list_w_int_parser(objH5Group.attrs['device'])
        inChannels = eval(objH5Group.attrs['inChannels'])
        outChannels = eval(objH5Group.attrs['outChannels'])
        blocking = objH5Group.attrs['blocking']
        # PlayRecMeasure attrs unpacking
        excitation = __h5_unpack(objH5Group['excitation'])
        outputAmplification = objH5Group.attrs['outputAmplification']
        # Recreating the object
        prObj = measurement(kind='playrec',
                            excitation=excitation,
                            outputAmplification=outputAmplification,
                            device=device,
                            inChannels=inChannels,
                            outChannels=outChannels,
                            blocking=blocking,
                            samplingRate=samplingRate,
                            freqMin=freqMin,
                            freqMax=freqMax,
                            comment=comment)
        return prObj

    elif objH5Group.attrs['class'] == 'FRFMeasure':
        # PyTTaObj attrs unpacking
        samplingRate = objH5Group.attrs['samplingRate']
        freqMin = _h5.none_parser(objH5Group.attrs['freqMin'])
        freqMax = _h5.none_parser(objH5Group.attrs['freqMax'])
        comment = objH5Group.attrs['comment']
        lengthDomain = objH5Group.attrs['lengthDomain']
        fftDegree = objH5Group.attrs['fftDegree']
        timeLength = objH5Group.attrs['timeLength']
        # Measurement attrs unpacking
        device = _h5.list_w_int_parser(objH5Group.attrs['device'])
        inChannels = eval(objH5Group.attrs['inChannels'])
        outChannels = eval(objH5Group.attrs['outChannels'])
        blocking = objH5Group.attrs['blocking']
        # PlayRecMeasure attrs unpacking
        excitation = __h5_unpack(objH5Group['excitation'])
        outputAmplification = objH5Group.attrs['outputAmplification']
        # FRFMeasure attrs unpacking
        method = _h5.none_parser(objH5Group.attrs['method'])
        winType = _h5.none_parser(objH5Group.attrs['winType'])
        winSize = _h5.none_parser(objH5Group.attrs['winSize'])
        overlap = _h5.none_parser(objH5Group.attrs['overlap'])
        # Recreating the object
        frfObj = measurement(kind='frf',
                             method=method,
                             winType=winType,
                             winSize=winSize,
                             overlap=overlap,
                             excitation=excitation,
                             outputAmplification=outputAmplification,
                             device=device,
                             inChannels=inChannels,
                             outChannels=outChannels,
                             blocking=blocking,
                             samplingRate=samplingRate,
                             freqMin=freqMin,
                             freqMax=freqMax,
                             comment=comment)
        return frfObj

    elif objH5Group.attrs['class'] == 'Analysis':
        # Analysis attrs unpacking
        anType = _h5.attr_parser(objH5Group.attrs['anType'])
        nthOct = _h5.attr_parser(objH5Group.attrs['nthOct'])
        minBand = _h5.attr_parser(objH5Group.attrs['minBand'])
        maxBand = _h5.attr_parser(objH5Group.attrs['maxBand'])
        comment = _h5.attr_parser(objH5Group.attrs['comment'])
        title = _h5.attr_parser(objH5Group.attrs['title'])
        dataLabel = _h5.attr_parser(objH5Group.attrs['dataLabel'])
        errorLabel = _h5.attr_parser(objH5Group.attrs['errorLabel'])
        xLabel = _h5.attr_parser(objH5Group.attrs['xLabel'])
        yLabel = _h5.attr_parser(objH5Group.attrs['yLabel'])
        # Analysis data unpacking
        data = np.array(objH5Group['data'])
        # If error in save moment was None no group was created for it
        if 'error' in objH5Group:
            error = np.array(objH5Group['error'])
        else:
            error = None
        # Recreating the object
        anObject = Analysis(anType=anType,
                            nthOct=nthOct,
                            minBand=minBand,
                            maxBand=maxBand,
                            data=data,
                            dataLabel=dataLabel,
                            error=error,
                            errorLabel=errorLabel,
                            comment=comment,
                            xLabel=xLabel,
                            yLabel=yLabel,
                            title=title)
        return anObject

    elif objH5Group.attrs['class'] == 'dict':
        dictObj = {}
        for PyTTaObjName, PyTTaObjH5Group in objH5Group.items():
            dictObj[PyTTaObjName] = __h5_unpack(PyTTaObjH5Group)
        return dictObj

    elif objH5Group.attrs['class'] == 'list':
        dictObj = {}
        for idx, PyTTaObjH5Group in objH5Group.items():
            dictObj[int(idx)] = __h5_unpack(PyTTaObjH5Group)
        idxs = [int(item) for item in list(dictObj.keys())]
        maxIdx = max(idxs)
        listObj = []
        for idx in range(maxIdx + 1):
            listObj.append(dictObj[idx])
        return listObj

    else:
        raise NotImplementedError
Ejemplo n.º 15
0
def colored_noise(color: str or int = 'white',
                  samplingRate: int = None,
                  fftDegree: int = None,
                  numChannels: int = None,
                  startMargin: float = None,
                  stopMargin: float = None,
                  windowing: str = 'hann'):
    """
    Power law noise generator.

    Based on the algorithm in:
    Timmer, J. and Koenig, M.:
    On generating power law noise.
    Astron. Astrophys. 300, 707-710 (1995)

    Generate random noise with respect to the `(1/f)**B` rate. `f` stands for
    frequency and `B` is an integer power.

    The colors and its spectrum characteristics:

        * Purple | Differentiated:
            * +6.02 dB/octave | +20 dB/decade | B = -2;
            * color: 'purple', 'diff', 'differentiated';

        * Blue | Azure:
            * +3.01 dB/octave | +10 dB/decade | B = -1;
            * color: 'blue', 'azure'

        * White | Flat:
            * +0.00 dB/octave | +0 dB/decade  | B = 0;
            * color: 'white', 'flat';

        * Pink | Flicker:
            * -3.01 dB/octave | -10 dB/decade | B = 1;
            * color: 'pink', 'flicker', '1/f';

        * Red | Brownian:
            * -6.02 dB/octave | -20 dB/decade | B = 2;
            * color: 'red', 'brown', 'brownian';

    The output signal will have `startMargin` silence at the beginning of the
    waveform, and `stopMargin` silence at the end.

    There is a fade-in between the starting silence and the noise itself that
    occurs during 5% of the total noise duration.

    @author: Chum4k3r
    """
    # Code snippet to guarantee that generated object name is
    # the declared at global scope
    # for frame, line in traceback.walk_stack(None):
    for framenline in traceback.walk_stack(None):
        # varnames = frame.f_code.co_varnames
        varnames = framenline[0].f_code.co_varnames
        if varnames == ():
            break
    # creation_file, creation_line, creation_function, \
    #     creation_text = \
    extracted_text = \
        traceback.extract_stack(framenline[0], 1)[0]
    # traceback.extract_stack(frame, 1)[0]
    # creation_name = creation_text.split("=")[0].strip()
    creation_name = extracted_text[3].split("=")[0].strip()

    # It was done like this because a function default argument is a value
    # assigned at import time, and PyTTa have a default object that handles
    # default values for all functions and all classes across all submodules.
    # In order to it work as expected, the values should be reassigned at
    # every function call to get updated default values. Otherwise, despite
    # how the default has it's properties values changed, it won't change
    # for the function calls.
    if samplingRate is None:
        samplingRate = default.samplingRate
    if fftDegree is None:
        fftDegree = default.fftDegree
    if numChannels is None:
        numChannels = len(default.outChannel)
    if startMargin is None:
        startMargin = default.startMargin
    if stopMargin is None:
        stopMargin = default.stopMargin

    # [samples] initial silence number of samples
    stopSamples = round(stopMargin * samplingRate)

    # [samples] ending silence number of samples
    startSamples = round(startMargin * samplingRate)

    # [samples] total silence number of samples
    marginSamples = startSamples + stopSamples

    # [samples] full signal number of samples
    numSamples = 2**fftDegree

    # [samples] Actual noise number of samples
    noiseSamples = int(numSamples - marginSamples)

    if type(color) == int:
        noiseSignal = _powerlaw_noise(noiseSamples, numChannels, color,
                                      samplingRate)
    elif type(color) == str:
        if color.upper() in ['PURPLE', 'DIFF', 'DIFFERENTIATED']:
            noiseSignal = _powerlaw_noise(noiseSamples, numChannels, -2,
                                          samplingRate)
        elif color.upper() in ['BLUE', 'AZURE']:
            noiseSignal = _powerlaw_noise(noiseSamples, numChannels, -1,
                                          samplingRate)
        elif color.upper() in ['WHITE', 'FLAT']:
            noiseSignal = _powerlaw_noise(noiseSamples, numChannels, 0,
                                          samplingRate)
        elif color.upper() in ['PINK', 'FLICKER', '1/F']:
            noiseSignal = _powerlaw_noise(noiseSamples, numChannels, 1,
                                          samplingRate)
        elif color.upper() in ['RED', 'BROWN', 'BROWNIAN']:
            noiseSignal = _powerlaw_noise(noiseSamples, numChannels, 2,
                                          samplingRate)
        else:
            raise ValueError("Unknow noise color.")
    else:
        raise TypeError("`color` must be int or str.")
    noiseSignal = __do_noise_windowing(noiseSignal, noiseSamples, windowing)
    # noiseSignal = noiseSignal / max(abs(noiseSignal))
    noiseSignal = np.concatenate((np.zeros(
        (int(startSamples), numChannels)), noiseSignal,
                                  np.zeros((int(stopSamples), numChannels))))
    noiseSignal = SignalObj(signalArray=noiseSignal,
                            domain='time',
                            freqMin=default.freqMin,
                            freqMax=default.freqMax,
                            samplingRate=samplingRate)
    noiseSignal.creation_name = creation_name
    return noiseSignal