예제 #1
0
    def expand(self, audio):
        ori_len = audio.shape[0]
        tmp = resample(audio, r=0.5, type='sinc_best')
        down_len = tmp.shape[0]
        tmp = resample(tmp,
                       r=(ori_len + 1) / float(down_len),
                       type='sinc_best')
        tmp = librosa.stft(audio, 1024)
        phase = np.divide(tmp, np.abs(tmp))
        spec_input = np.abs(librosa.stft(audio, 1024))[0:n_input, ::]
        spec_input = spec_input[::, 0:spec_input.shape[1] // n_len * n_len]
        spec_input = np.split(spec_input, spec_input.shape[1] // n_len, axis=1)
        spec_input = np.asarray(spec_input)
        spec_input = np.expand_dims(spec_input, axis=-1)
        feed_dict = {self.input_op: np.log1p(spec_input) / 12.0}
        debug = self.sess.run(self.debug_op, feed_dict=feed_dict)
        np.save('debug.npy', debug)
        S = self.sess.run(self.eva_op, feed_dict=feed_dict)
        S[S >= 5e3] = 5e3
        S[S <= 0] = 0
        print('mean', np.mean(S))
        print(np.sum(np.isinf(S)))
        S = np.squeeze(np.concatenate(np.split(S, S.shape[0]), axis=2),
                       axis=(0, -1))
        phase = phase[..., :S.shape[1]]
        print(phase.shape)
        print(S.shape)
        print(np.sum(np.isinf(np.multiply(S, phase))))

        X = librosa.istft(np.multiply(S, phase))
        return X
예제 #2
0
def __stream_audio_realtime(filepath, rate=44100):
    total_chunks = 0
    format = pyaudio.paInt16
    channels = 1 if sys.platform == 'darwin' else 2
    record_cap = 10 # seconds
    p = pyaudio.PyAudio()
    stream = p.open(format=format, channels=channels, rate=rate, input=True, frames_per_buffer=ASR.chunk_size)
    print "o\t recording\t\t(Ctrl+C to stop)"
    try:
        desired_rate = float(desired_sample_rate) / rate # desired_sample_rate is an INT. convert to FLOAT for division.
        for i in range(0, rate/ASR.chunk_size*record_cap):
            data = stream.read(ASR.chunk_size)
            _raw_data = numpy.fromstring(data, dtype=numpy.int16)
            _resampled_data = resample(_raw_data, desired_rate, "sinc_best").astype(numpy.int16).tostring()
            total_chunks += len(_resampled_data)
            stdout.write("\r  bytes sent: \t%d" % total_chunks)
            stdout.flush()
            yield _resampled_data
        stdout.write("\n\n")
    except KeyboardInterrupt:
        pass
    finally:
        print "x\t done recording"
        stream.stop_stream()
        stream.close()
        p.terminate()   
def resample(y, orig_sr, target_sr, res_type='sinc_fastest'):
    """Resample a signal from orig_sr to target_sr

    Arguments:
      y           -- (ndarray)    audio time series 
      orig_sr     -- (int)        original sample rate of y
      target_sr   -- (int)        target sample rate
      res_type    -- (str)        resample type (see below)
    
    Returns y_hat:
      y_hat       -- (ndarray)    y resampled from orig_sr to target_sr

    Notes:
        if scikits.samplerate is installed, resample will use res_type
        otherwise, it will fall back on scip.signal.resample
    """

    if orig_sr == target_sr:
        return y

    if _HAS_SAMPLERATE:
        y_hat = samplerate.resample(y, 
                                            float(target_sr) / orig_sr, 
                                            res_type)
    else:
        n_samples = len(y) * target_sr / orig_sr
        y_hat = scipy.signal.resample(y, n_samples, axis=-1)

    return y_hat
예제 #4
0
    def prepareData(self):
        if self.newSampleRate is not None:
            resamplingFactor = float(self.newSampleRate) / float(
                self.sampleRate)
        elif self.filterb is None:
            print("skipping data preparation, nothing to do")
            return

        newChannels = [None for x in xrange(len(self.channels))]
        for i, channel in itertools.izip(xrange(len(self.channels)),
                                         self.channels):
            print("channel {0}".format(i))
            newData = channel
            if self.filterb is not None:
                print("  lowpass")
                newData = signal.lfilter(self.filterb, self.filtera, channel)
            if self.newSampleRate is not None:
                print("  resampling")
                newData = samplerate.resample(
                    numpy.array(channel), resamplingFactor,
                    'sinc_best' if self.filterb is None else 'sinc_medium')
            newChannels[i] = newData
        self.channels = newChannels
        if self.newSampleRate is not None:
            self.sampleRate = self.newSampleRate
예제 #5
0
파일: Audio.py 프로젝트: zelurker/fofix
  def play(self):
    if not self.sound:
      self.sound   = pygame.mixer.Sound(self.file)
      self.freq = self.info.rate
    if resampleOk:
        (freq,format,channels) = pygame.mixer.get_init()
        if freq != self.freq:
          print "reload sound ",self.file
          self.freq = self.info.rate
          self.sound   = pygame.mixer.Sound(self.file)
        if self.freq != freq:
            snd_array = pygame.sndarray.array(self.sound)
            samples = len(snd_array)/2
            samples = int(samples*freq*1.0/(self.info.rate))
            print "start resampling ",self.file," from ",self.info.rate," to ",freq," len ",len(snd_array)/2," visée ",samples
    #        if samples != len(snd_array):
    #          snd_array = np.resize(snd_array,(samples,2))
            snd_array = resample(snd_array, freq*1.0/self.info.rate, "sinc_fastest").astype(snd_array.dtype)
            # datal = signal.resample(snd_array[0::2],samples).astype(snd_array.dtype)
            # datar = signal.resample(snd_array[1::2],samples).astype(snd_array.dtype)
            # snd_array = np.resize(snd_array,(len(datal)*2,2))
            # snd_array[0::2] = datal
            # snd_array[1::2] = datar
            # print "end resampling ",snd_array
            self.sound = pygame.sndarray.make_sound(snd_array)
            self.freq = freq

    if self.volume:
      self.sound.set_volume(self.volume)
    self.channel = self.sound.play()
    self.playTime = time.time()
    if self.event:
        self.channel.set_endevent(self.event)
예제 #6
0
def resample(source, source_sf, target_sf):
    if source_sf != target_sf:
        from scikits import samplerate
        ratio = float(target_sf) / source_sf
        return samplerate.resample(source, ratio, RESAMPLE_TYPE)
    else:
        return source
예제 #7
0
def __stream_audio_realtime(filepath, rate=44100):
    total_chunks = 0
    format = pyaudio.paInt16
    channels = 1 if sys.platform == 'darwin' else 2
    record_cap = 10  # seconds
    p = pyaudio.PyAudio()
    stream = p.open(format=format,
                    channels=channels,
                    rate=rate,
                    input=True,
                    frames_per_buffer=ASR.chunk_size)
    print "o\t recording\t\t(Ctrl+C to stop)"
    try:
        desired_rate = float(
            desired_sample_rate
        ) / rate  # desired_sample_rate is an INT. convert to FLOAT for division.
        for i in range(0, rate / ASR.chunk_size * record_cap):
            data = stream.read(ASR.chunk_size)
            _raw_data = numpy.fromstring(data, dtype=numpy.int16)
            _resampled_data = resample(_raw_data, desired_rate,
                                       "sinc_best").astype(
                                           numpy.int16).tostring()
            total_chunks += len(_resampled_data)
            stdout.write("\r  bytes sent: \t%d" % total_chunks)
            stdout.flush()
            yield _resampled_data
        stdout.write("\n\n")
    except KeyboardInterrupt:
        pass
    finally:
        print "x\t done recording"
        stream.stop_stream()
        stream.close()
        p.terminate()
예제 #8
0
파일: data.py 프로젝트: theunissenlab/tlab
def strfpak_resample(sound,slice_rate,resample_type):
    
    '''
    Resample a sound object using scikits.samplerate.resample.
    Sound is upsampled to nearest multiple of slice_rate;
    if sound.samplerate == slice_rate, no resampling is done.
    Sound is also converted to float.
    Currently, error is less than 10dB max, mostly very good.
    '''
    
    from scikits.samplerate import resample
    from copy import deepcopy
    
    # Calculate the resampling frequency for STRFPAK 5.3:
    # round up to the nearest multiple of 'slice_rate'
    input_freq = floor(sound.samplerate)
    output_freq = ceil(input_freq/slice_rate)*slice_rate
    
    # Copy so that we don't alter original object
    sound2 = deepcopy(sound)
    
    # Scale factor -- MATLAB makes .wav data be -1:1, scipy does integer
    sound2.data = sound2.data/float(2**15)
    
    # Resample if necessary
    if output_freq > input_freq:
        sound2.data = resample(sound2.data,output_freq/input_freq,
                               resample_type)
        sound2.samplerate = output_freq
    
        # Pad because matlab resampling works differently
        sound2.data = sound2._zeropad(0,2)
    
    return sound2
    def record(self, signals, Fs):
        '''
        This simulates the recording of the signals by the microphones.
        In particular, if the microphones and the room simulation
        do not use the same sampling frequency, down/up-sampling
        is done here.

        Arguments:
        ----------

        signals: An ndarray with as many lines as there are microphones.
        Fs: the sampling frequency of the signals.
        '''

        if signals.shape[0] != self.M:
            raise NameError('The signals array should have as many lines as there are microphones.')

        if signals.ndim != 2:
            raise NameError('The signals should be a 2D array.')

        if Fs != self.Fs:
            from scikits.samplerate import resample
            Fs_ratio = self.Fs/float(Fs)
            newL = int(Fs_ratio*signals.shape[1])-1
            self.signals = np.zeros((self.M, newL))
            # scikits.samplerate resample function considers columns as channels (hence the transpose)
            for m in range(self.M):
                self.signals[m] = resample(signals[m], Fs_ratio, 'sinc_best')
        else:
            self.signals = signals
def convert_wav(File, ofile):
    import scikits.audiolab as audiolab
    from scikits.samplerate import resample
    # lastest scikits.audiolab include sound record lib, based on python-alsaaudio
    # if you want make the down sample rate using scipy.signal
    #import scipy.signal

    #using audiolab to read wav file
    Signal, fs = audiolab.wavread(File)[:2]
    #changing the original sample rate to 16000fs fast mode
    Signal = resample(Signal, fr/float(fs), 'sinc_best')
     
    #changing sample rate from audio file using scipy this is a bit slow
    #Signal=scipy.signal.resample(Signal,int(round(len(Getsignal)*fr)/float(fs)),window=None)
     
    # file Format type
    fmt = audiolab.Format('flac', 'pcm16')
    nchannels   = 1
     
    # convert into the file .flac
    ofile =  audiolab.Sndfile(FileNameTmp, 'w', fmt, nchannels, fr)
     
    #writing in the file
    ofile.write_frames(Signal)
    #
    return ofile
def wav_convert(data, SR, tar_freq):

    # If the input signal is stereo, make it mono.
    if ndim(data) == 2:

        # Mix stereo signal into a mono signal
        buff01 = 0.49 * (data[:, 0] + data[:, 1])
        wave_ts = array(buff01)

    else:
        wave_ts = array(data[:])

    wave_ts = array(wave_ts)

    # Set a sampling rate
    up_SR = 44100

    # Compute a ratio to feed into resample function
    ratio = float(float(tar_freq)/float(up_SR))

    # Resample the file.
    wave_ts = resample(wave_ts, ratio , 'linear')

    # Transpose the data list.
    wave_ts = transpose(wave_ts)

    # Return wave_ts signal
    return wave_ts
def wav_convert(data, SR, tar_freq):

    # If the input signal is stereo, make it mono.
    if ndim(data) == 2:
        buff01 = 0.49 * (data[:, 0] + data[:, 1])
        wave_ts = array(buff01)
    else:
        wave_ts = array(data[:])

    wave_ts = array(wave_ts)
    # print "Shape of wave_ts", shape(wave_ts)

    up_SR = 44100
    ratio = float(float(tar_freq)/float(up_SR))
    print tar_freq
    print float(float(tar_freq)/float(up_SR))
    wave_ts = resample(wave_ts, ratio , 'linear')

    # SR_div = int(floor(up_SR/tar_freq))
    # wave_ts = sig.decimate(wave_ts, SR_div)

    # Transpose the data list.
    wave_ts = transpose(wave_ts)
    print 'length wave_ts', shape(wave_ts)

    return wave_ts
예제 #13
0
def analyze(sig, samplerate=8000, resample_to=8000):
    e_min_scale = 0.3
    e_min_distance = 1
    e_a_scale = 0.5
    e_W_ms = 2000
    min_len = 30
    if samplerate != resample_to:
        print("resampling")
        sig = srate.resample(sig, resample_to/samplerate, 'sinc_best')

    frames,frame_size = ad.frames(sig, samplerate, 64)
    #ac_peaks = [ad.ac_peaks(frame) for frame in frames]
    #energy = ad.energies(frames, ...)
    #energy = log_energy
    print("getting normalized spectra")
    normalized_spectrum = ad.normalized_spectrum(frames, samplerate)
    print("spectral entropy")
    spectral_entropy = np.fromiter(ad.entropy(frame) for frame in normalized_spectrum)
    entopy_t = np.percentile(spectral_entropy, 80)
    se_segments = ad.segments_to_seconds(ad.entropy_segment_indexes(spectral_entropy, entropy_t))
    print("energy based computations")
    energy, smooth, en_a, en_t, en_lmin = ad.energy_thresholds(sig, noise_dist=e_min_distance, a_scale=e_a_scale, min_scale=e_min_scale, W_ms=e_W_ms)
    energy_segments = ad.get_voice_segments(smooth, en_t, ad.get_segment_indexes(smooth, en_t, min_len=10))
    #energy_t = ad.thresholds(frames)
    #entropy_t = ad.entropy_t(frames)
    #energy_indexes = ad.get_segment_indexes(x, t, min_len=30)
    #entropy_indexes = ad.get_entropy_indexes(x, t, min_len=30)
    #combination_indexes = ad.get_combined_indexes(x, t, min_len=30)
    return spectral_entropy, se_t, se_segments, energy, en_t, energy_segments
예제 #14
0
파일: dec_mic.py 프로젝트: merzod/mirror
def listen(stream, queue):
    try:
        while not exit:
            stream.start_stream()
            print('Listening...')
            for i in range(0,size):
                data = stream.read(frame)
                ar = numpy.fromstring(data, dtype=numpy.int16)
                data2 = resample(ar, (16000./48000.), 'linear')
                q.put(data2)
#                samps = numpy.fromstring(data, dtype=numpy.int16)
#                print (samps, q.qsize())
                rms = audioop.rms(data, 2)
                print rms
            stream.stop_stream()
            if exit:
                sys.exit()
            q.join()
    except IOError:
        print('ERROR!!!!')
        pass
    stream.stop_stream()
    stream.close()
    pyaud.terminate()
    print "----------------------------------------------------------------------------------------------------------------"
예제 #15
0
	def calculate_attributes(self):
		source = self.source
		freq = self.frequency
		sampling_rate = float(source.sampling_rate)
		fft_sampling_rate = sampling_rate/float(source.fft_step_size)
		window_length = float(source.fft_window_size)/sampling_rate
		# FIXME real time should be passed in as an extra field
		self.start = window_length
		if self.first_frame > 0:
			self.start += (self.first_frame - 1)/fft_sampling_rate
		self.length = window_length
		if len(freq) > 1:
#			if 'length' not in self.__dict__:
#				print self.__dict__
#			print self.__dict__['length']
			self.length += (len(freq) - 1)/fft_sampling_rate
#		print self.length
		self.end = self.start + self.length
		for k in ('frequency','amplitude'):
			a = getattr(self,k)
			setattr(self, k+'_min', min(a))
			setattr(self, k+'_max', max(a))
			setattr(self, k+'_mean', sum(a)/len(a))
		freq_window = 2 # seconds
		freq_fft_size = 128
		resampled_freq = resample(freq, freq_window*freq_fft_size/fft_sampling_rate, 'sinc_fastest') # FIXME truncate array?
		self.freq_fft = abs(rfft(resampled_freq,n=freq_fft_size,overwrite_x=True))[1:]
예제 #16
0
    def generate_scale(self):
        """
        Given the initial note, middle C, create the rest of the musical scale by
        resampling.
    
        Returns: Dictionary of musical scale with the key being the name of the note
        and the value being the corresponding sound object.
    
        """    
        pygame.mixer.init()
    
        wav = util.get_app_path() + "res/piano-c.wav"
        sound = pygame.mixer.Sound(wav)
        
        pygame.mixer.set_num_channels(32)
        sndarray = pygame.sndarray.array(sound)

        ratio_dict = {'low_c': 1, 'c_sharp': .944, 'd': .891, 'd_sharp':.841, 'e':.794,
                  'f':.749, 'f_sharp': .707, 'g': .667, 'g_sharp': .63, 'a': .594,
                  'a_sharp': .561, 'b':.53, 'high_c':.5}
    
        # Generate the Sound objects from the dictionary.
        scale = {}
        for key,value in ratio_dict.iteritems():
            smp = resample(sndarray, value,"sinc_fastest").astype(sndarray.dtype)
            # Use the key, currently a string, as a variable
            scale[key] = pygame.sndarray.make_sound(smp)
            
        self.scale_dict=scale
예제 #17
0
    def calculate_attributes(self):
        source = self.source
        freq = self.frequency
        sampling_rate = float(source.sampling_rate)
        fft_sampling_rate = sampling_rate / float(source.fft_step_size)
        window_length = float(source.fft_window_size) / sampling_rate
        # FIXME real time should be passed in as an extra field
        self.start = window_length
        if self.first_frame > 0:
            self.start += (self.first_frame - 1) / fft_sampling_rate
        self.length = window_length
        if len(freq) > 1:
            #			if 'length' not in self.__dict__:
            #				print self.__dict__
            #			print self.__dict__['length']
            self.length += (len(freq) - 1) / fft_sampling_rate


#		print self.length
        self.end = self.start + self.length
        for k in ('frequency', 'amplitude'):
            a = getattr(self, k)
            setattr(self, k + '_min', min(a))
            setattr(self, k + '_max', max(a))
            setattr(self, k + '_mean', sum(a) / len(a))
        freq_window = 2  # seconds
        freq_fft_size = 128
        resampled_freq = resample(freq, freq_window * freq_fft_size /
                                  fft_sampling_rate,
                                  'sinc_fastest')  # FIXME truncate array?
        self.freq_fft = abs(
            rfft(resampled_freq, n=freq_fft_size, overwrite_x=True))[1:]
예제 #18
0
파일: train_codebook.py 프로젝트: mwv/hac
def train_codebook(basedirectory,
                   spectral,
                   desired_fs,
                   clfs,
                   n_samples):
    """Train the codebooks.

    Arguments:
    :param basedirectory: root directory of the audio corpus
    :param spectral:
      Spectral feature extraction.
      Object should be picklable and implement the
      \c Spectral abc; i.e. provide a \c transform method.
    :param clfs:
      list of clusterers. valid clusterers have a \c fit method
      and a \c predict method. optionally, for soft vq, also implement
      a \c predict_proba method.
    :param n_samples:
      number of spectral frames to sample from the audio corpus.
    :returns:
      a list of Codebook objects, of same length as the output of spectral_func
    """
    wavs = list(rglob(basedirectory, '*.wav'))
    np.random.shuffle(wavs)

    inds = None
    idx = 0
    X = None
    for i, wav in enumerate(wavs):
        if i % 10 == 0 and i > 0:
            print 'samples: {3}/{4}; loading file: {0} ({1}/{2})'.format(
                wavs[i],
                i+1,
                len(wavs),
                X.shape[0],
                n_samples
            )
        sig, fs, _ = audiolab.wavread(wav)
        start, stop = trim_silence(sig, fs)
        specs = spectral.transform(samplerate.resample(sig[start:stop],
                                                       desired_fs/fs,
                                                       'sinc_best'))
        if inds is None:
            inds = [0] + list(np.cumsum([spec.shape[1] for spec in specs]))
        spec = np.hstack(specs)
        if idx + spec.shape[0] >= n_samples:
            spec = spec[:n_samples - idx, :]
        if X is None:
            X = spec
        else:
            X = np.vstack((X, spec))
        idx += spec.shape[0]
        if idx >= n_samples:
            break

    cdbs = [Codebook(clf) for clf in clfs]
    for i, cdb in enumerate(cdbs):
        cdb.train(X[:, inds[i]:inds[i+1]])
    return cdbs
예제 #19
0
def resample_noise_file(sr_audio, sr_noise, data_noise):
    start1 = time.time()
    if sr_noise != 0:
        resampling_rate = float(sr_audio) / float(sr_noise)
        data_noise = resample(data_noise, resampling_rate, "sinc_best").astype(np.int16)
    end1 = time.time()
    # print("0:", end1-start1)
    return data_noise
예제 #20
0
파일: sounds.py 프로젝트: dkiela/thesis
 def resample(self, samplerate, resample_type="sinc_best"):
     """
     Returns a resampled version of the sound.
     """
     if not have_scikits_samplerate:
         raise ImportError("Need scikits.samplerate package for resampling")
     y = array(resample(self, float(samplerate / self.samplerate), resample_type), dtype=float64)
     return Sound(y, samplerate=samplerate)
예제 #21
0
파일: gablab.py 프로젝트: coreyker/gablab
def TestGBPDN2():    
    # ________________________________________
    print 'Test: generalized basis pursuit decomposition'

    fs = 8000.
    btmp,fstmp,fmt = audiolab.wavread('glockenspiel.wav')
    b = samplerate.resample(btmp, fs/fstmp,'sinc_best')
    L = len(b)

    A = GaborBlock(L,1024)
    B = GaborBlock(A.M,64)
    C = DictionaryUnion(A,B)
    b = np.hstack((b,np.zeros(C.M-L))) # pad to block boundary
    spow = 10*np.log10(b.conj().dot(b))
    
    # additive white noise
    snr = 15
    nvar = np.sum(np.abs(b)**2)/(10**(snr/10)) # 1e-2    
    n = np.sqrt(nvar)*np.random.randn(C.M)/np.sqrt(C.M)
    
    tonemap = np.reshape(range(A.N),(A.N/A.fftLen,A.fftLen)).transpose()
    transmap = np.reshape(range(B.N),(B.N/B.fftLen,B.fftLen)).transpose()
    
    f,fgrad = BP_factory()
    #f,fgrad = TT_factory(tonemap,transmap)
        
    xe = GBPDN_momentum(C,b+n,f,fgrad,maxerr=nvar,maxits=200,stoptol=1e-3,muinit=1e-1,momentum=0.9,smoothinit=1e-5,anneal=0.96)    
    ye = np.real(C.dot(xe))
    r = b-ye;
    rpow = 10*np.log10(r.conj().dot(r))
                       
    print 'SNR = %f' % (spow-rpow)

    ynoise = np.array(samplerate.resample(b+n,fstmp/fs,'sinc_best'),dtype='float64')
    ydenoise = np.array(samplerate.resample(ye,fstmp/fs,'sinc_best'),dtype='float64')
    
    yetone = np.real(A.dot(xe[:tonemap.size]))
    yetone = np.array(samplerate.resample(yetone,fstmp/fs,'sinc_best'),dtype='float64')

    yetrans = np.real(B.dot(xe[tonemap.size:]))
    yetrans = np.array(samplerate.resample(yetrans,fstmp/fs,'sinc_best'),dtype='float64')
    
                      
    print 'Error (should be <= %f): %f' % (nvar,np.sum((r)**2))
    print '----------------------------------------'
예제 #22
0
def resample(x, fs_old, fs_new, axis=0, algorithm='scipy'):
    """Resample signal

    If available resampling is done using scikit samplerate. Otherwise,
    scipy's FFT-based resample function will be used.

    Converters available in scikits.samplerate:
    - sinc_medium
    - linear
    - sinc_fastest
    - zero_order_hold
    - sinc_best

    """

    if fs_old == fs_new:
        return x

    else:

        ratio = float(fs_new) / fs_old
        if use_scikits_resample and algorithm != 'scipy':

            if algorithm == 'scikits':
                algo = 'sinc_medium'
            else:
                algo = algorithm

            if axis == 0:
                tmp = sks.resample(x, ratio, algo)
            else:
                tmp = sks.resample(x.T, ratio, algo)

            if tmp.dtype != x.dtype:
                tmp = tmp.astype(x.dtype, casting='safe')

            return tmp

        else:
            if axis == 0:
                n_samples_new = int(np.round(x.shape[0] * ratio))
                return signal.resample(x, n_samples_new)
            else:
                n_samples_new = int(np.round(x.shape[1] * ratio))
                return signal.resample(x, n_samples_new, axis=axis)
예제 #23
0
 def play(self):
     if have_sk_audiolab and have_sk_samplerate:
         play(
             np.array(resample(self.data, 44100. / self.fs, 'sinc_best'),
                      dtype=np.float64))
     else:
         print(
             'Warning: scikits.audiolab and scikits.samplerate are required to play audiofiles.'
         )
예제 #24
0
def resample(y, orig_sr, target_sr, res_type='sinc_fastest', fix=True,
             **kwargs):
    """Resample a time series from orig_sr to target_sr

    :usage:
        >>> # Downsample from 22 KHz to 8 KHz
        >>> y, sr   = librosa.load('file.wav', sr=22050)
        >>> y_8k    = librosa.resample(y, sr, 8000)

    :parameters:
      - y           : np.ndarray [shape=(n,)]
          audio time series

      - orig_sr     : int > 0 [scalar]
          original sampling rate of ``y``

      - target_sr   : int > 0 [scalar]
          target sampling rate

      - res_type    : str
          resample type (see note)

      - fix         : bool
          adjust the length of the resampled signal to be of size exactly
          ``ceil(target_sr * len(y) / orig_sr)``

      - *kwargs*
          If ``fix==True``, additional keyword arguments to pass to
          :func:`librosa.util.fix_length()`.

    :returns:
      - y_hat       : np.ndarray [shape=(n * target_sr / orig_sr,)]
          ``y`` resampled from ``orig_sr`` to ``target_sr``

    .. note::
        If `scikits.samplerate` is installed, :func:`librosa.core.resample`
        will use ``res_type``.
        Otherwise, it will fall back on `scipy.signal.resample`

    """

    if orig_sr == target_sr:
        return y

    n_samples = int(np.ceil(y.shape[-1] * float(target_sr) / orig_sr))

    if _HAS_SAMPLERATE:
        y_hat = samplerate.resample(y.T,
                                    float(target_sr) / orig_sr,
                                    res_type).T
    else:
        y_hat = scipy.signal.resample(y, n_samples, axis=-1)

    if fix:
        y_hat = util.fix_length(y_hat, n_samples, **kwargs)

    return np.ascontiguousarray(y_hat, dtype=y.dtype)
예제 #25
0
파일: sounds.py 프로젝트: yzerlaut/brian
 def resample(self, samplerate, resample_type='sinc_best'):
     '''
     Returns a resampled version of the sound.
     '''
     if not have_scikits_samplerate:
         raise ImportError('Need scikits.samplerate package for resampling')
     y = array(resample(self, float(samplerate / self.samplerate), resample_type),
               dtype=float64)
     return Sound(y, samplerate=samplerate)
예제 #26
0
 def set_note(self, note, hstring):
     octave = note[-1:]
     self.note_dict[note]['octave']=octave 
     pure_note = note.strip(octave)
     ratio = self.note_dict[note]['ratio']
     globals()[note] = resample(hstring, ratio, "sinc_best").astype(hstring.dtype)
     globals()[note+"_note"] = self.pygame.sndarray.make_sound(globals()[note])
     globals()[note+"_note"].set_volume(.15)
     self.note_dict[note]['snd_arry']=note+"_note" 
예제 #27
0
 def resample(self, samplerate, resample_type='sinc_best'):
     '''
     Returns a resampled version of the sound.
     '''
     if not have_scikits_samplerate:
         raise ImportError('Need scikits.samplerate package for resampling')
     y = np.array(resample(self, float(samplerate / self.samplerate), resample_type),
               dtype=np.float64)
     return Sound(y, samplerate=samplerate)
예제 #28
0
def rce2wav(rcefile,
            samplerate,
            resample=None,
            split=False,
            splitsuffixes=None):
    """
    a rce file is a raw, float64 file containing pressure level at each frame.

    FORMAT: 
    1 double: number of sources
    Each frame then contains the data for each source, interleaved

    rcefile    : path to the .rce file
    samplerate : samplerate of the simulation (sim.samplerate)
    resample   : INT (new samplerate). 
                 If given, file will be resampled to this sample-rate.

    NOTE:
    an .rce file is simply raw data. To load it: numpy.fromfile(path, dtype=float).
    The samplerate is not saved with the data, but it is the same used by the
    simulation.
    """
    def name_with_suffix(origname, ext='wav', suffix=""):
        if suffix:
            suffix = "-%s" % str(suffix)
        base = os.path.splitext(origname)[0]
        return "%s%s.%s" % (base, suffix, ext)

    raw = rce2array(rcefile)
    from e import audiosample
    if resample is not None:
        try:
            from scikits import samplerate
            raw = samplerate.resample(raw, resample / samplerate, 'sync_fast')
            samplerate = resample
        except ImportError:
            warnings.warn(
                "resampling not available (install scikits.samplerate to enable). Keeping original samplerate"
            )
    numch = raw.shape[1] if len(raw.shape) > 1 else 1
    if not split or numch == 1:
        out = name_with_suffix(rcefile)
        audiosample.Sample(raw, samplerate).write(out)
        return out
    else:
        outs = []
        if splitsuffixes is None:
            splitsuffixes = [str(i) for i in range(numch)]
        else:
            splitsuffixes = splitsuffixes[:numch]
        for i, suffix in enumerate(splitsuffixes):
            out = name_with_suffix(rcefile, suffix=suffix)
            outs.append(out)
            audiosample.Sample(raw[:, i], samplerate).write(out)
        return outs
예제 #29
0
 def playSound(self, pitch, noteIndex):
     self.getControlVals()
     toneArray = self.soundArrays[self.selectedSound]
     if self.attack != 0:
         changedArray = resample(toneArray*self.attack,
                        pitch,"sinc_fastest").astype(toneArray.dtype)
     else: 
         changedArray = resample(toneArray,
          pitch,"sinc_fastest").astype(toneArray.dtype)#makes correct type
     newSound = pygame.sndarray.make_sound(changedArray)
     newSound.set_volume(self.volume)
     if (self.isRecording and self.songCount < len(self.songs) and
         self.selectedSong != None):
         currentTime = time.time()
         timePlayed = currentTime - self.startTime  
         self.makeRecording(newSound,timePlayed,noteIndex,self.volume,
                         self.length,self.fade,self.isSustained)
     if self.isSustained: self.length = -1
     if self.fade != 0: newSound.play(0,self.length,0).fadeout(self.fade)
     elif self.isSustained: newSound.play(0,-1,0)
     else: newSound.play(0,self.length,0)
예제 #30
0
파일: speech.py 프로젝트: trungnt13/blocks
def resample(s, fs, fs_new, algorithm='sinc_best'):
    '''
    sinc_medium : Band limited sinc interpolation, medium quality, 121dB SNR, 90% BW.
    linear : Linear interpolator, very fast, poor quality.
    sinc_fastest : Band limited sinc interpolation, fastest, 97dB SNR, 80% BW.
    zero_order_hold : Zero order hold interpolator, very fast, poor quality.
    sinc_best : Band limited sinc interpolation, best quality, 145dB SNR, 96% BW.
    '''
    from scikits.samplerate import resample
    if fs_new == fs:
        return s
    return resample(s, fs_new / fs, 'sinc_best')
예제 #31
0
def load_song(song_path):
    """
    Loads an individual song, normalizes it, and returns it.
    """
    fs, data = wavfile.read(song_path)
    data = data.mean(axis=1).flatten()
    data = samplerate.resample(data, TARGET_FS / fs, 'linear')

    # Normalize
    data = data / np.max(data) * .95
    print "\t- Loaded %s" % song_path
    return data
예제 #32
0
    def _map(f, n_filters=40, n_ceps=13, fs=8000, downsample='sinc_best',
             win=0.025, shift=0.01, delta_order=2, energy=True, vad=True,
             dtype='float32', get_spec=False, get_mspec=True, get_mfcc=False,
             robust=True):
        '''
        Return
        ------
        (name, features, vad, sum1, sum2)

        '''
        try:
            audio_path, segments = f
            # load audio data
            s, _ = speech.read(audio_path)
            # check frequency for downsampling (if necessary)
            if _ is not None:
                if fs is not None and fs != _:
                    if fs < _: # downsample
                        s = resample(s, fs / _, 'sinc_best')
                    else:
                        raise ValueError('Cannot perform upsample from frequency: '
                                         '{}Hz to {}Hz'.format(_, fs))
                else:
                    fs = _
            N = len(s)
            features = []
            for name, start, end, channel in segments:
                start = int(float(start) * fs)
                end = int(N if end < 0 else end * fs)
                data = s[start:end, channel] if s.ndim > 1 else s[start:end]
                tmp = speech_features_extraction(data.ravel(), fs=fs,
                    n_filters=n_filters, n_ceps=n_ceps,
                    win=win, shift=shift, delta_order=delta_order,
                    energy=energy, vad=vad, dtype=dtype,
                    get_spec=get_spec, get_mspec=get_mspec, get_mfcc=get_mfcc)
                if tmp is not None:
                    features.append((name,) + tmp)
                else:
                    msg = 'Ignore segments: %s, error: NaN values' % name
                    warnings.warn(msg)
            return features
        except Exception, e:
            msg = 'Ignore file: %s, error: %s' % (f[0], str(e))
            warnings.warn(msg)
            if robust:
                return None
            else:
                import traceback; traceback.print_exc()
                raise e
예제 #33
0
 def resample_data(self):
     for i_series in xrange(8):
         X_series = np.array(self.train_X_series[i_series]).astype(np.float32)
         X_series = resample(X_series, 250.0/500.0, 'sinc_fastest')
         self.train_X_series[i_series] = X_series
         y_series = np.array(self.train_y_series[i_series]).astype(np.int32)
         # take later predictions ->
         # shift all predictions backwards compared to data.
         # this ensures you are not using data from the future to make a prediciton
         # rather in a bad case maybe you do not even have all data up to the sample
         # to make the prediction
         y_series = y_series[1::2]
         # maybe some border effects remove predictions
         y_series = y_series[-len(X_series):]
         self.train_y_series[i_series] = y_series
예제 #34
0
def test_mono():
    fs = 16000.
    fr = 8000.

    f0 = 440.

    # Create a small stereo audio array with dephased channel
    x = np.sin(2 * np.pi * f0/fs * np.arange(0, 2 * fs))

    # Upsampled reference
    z_r = np.sin(2 * np.pi * f0/fr * np.arange(0, 2 * fr))

    z = resample(x, fr /fs, 'sinc_best')

    assert np.max(np.abs(z_r[10:-1] - z[10:])) < 1e-2
예제 #35
0
 def resample_data(self):
     for i_series in xrange(8):
         X_series = np.array(self.train_X_series[i_series]).astype(
             np.float32)
         X_series = resample(X_series, 250.0 / 500.0, 'sinc_fastest')
         self.train_X_series[i_series] = X_series
         y_series = np.array(self.train_y_series[i_series]).astype(np.int32)
         # take later predictions ->
         # shift all predictions backwards compared to data.
         # this ensures you are not using data from the future to make a prediciton
         # rather in a bad case maybe you do not even have all data up to the sample
         # to make the prediction
         y_series = y_series[1::2]
         # maybe some border effects remove predictions
         y_series = y_series[-len(X_series):]
         self.train_y_series[i_series] = y_series
def create_specgram_image_of_audio_file(fid):
    
    # Read wav file
    wave_obj = wave.open(fid, 'r')

    # Vectorize the wave file
    sample_vec = np.fromstring(wave_obj.readframes(wave_obj.getnframes()), np.short)/32780.0
    sample_rate = wave_obj.getframerate()  # get sample rate
    wave_obj.close()

    # Re-sample the vector to 22050 Hz
    if not sample_rate == 22050:
        sample_vec = resample(sample_vec, FS/sample_rate, 'sinc_best')
    
    num_of_samples = len(sample_vec)
    
    # Normalize the vector
    sample_vec = 0.9*sample_vec/sample_vec.max()

    # Create spectrogram
    # the window size (FFT size) is 512, and FFT HOP size is 128 -> 128/512 = 0.25 meaning a 75% overlap
    num_of_specgram_frames = int((num_of_samples-FFT_SIZE)/FFT_HOP_SIZE) + 1

    # Hanning window function
    window = 0.5 * (1.0 - np.cos(np.array(range(FFT_SIZE))*2.0*np.pi/(FFT_SIZE-1)))

    # Apply STFT
    specgram = []
    for j in range(num_of_specgram_frames):
        vec = sample_vec[j*FFT_HOP_SIZE: j*FFT_HOP_SIZE + FFT_SIZE] * window
        real_fft = np.fft.rfft(vec, FFT_SIZE)
        real_fft = abs(real_fft[:int(FFT_SIZE/2)])
        specgram.append(real_fft)

    # Create spectrogram image
    specgram_transposed = np.transpose(np.array(specgram))
    specgram_image = np.zeros_like(specgram_transposed)

    for i in range(specgram_transposed.shape[0]):
        specgram_image[i] = specgram_transposed[-i - 1]
    
    if np.max(specgram_image) <= 0.0:
        print('Problem: np.max(specgram_image) <= 0.0')
    else:
        specgram_image /= np.max(specgram_image)
    
    return specgram_image
예제 #37
0
파일: gablab.py 프로젝트: coreyker/gablab
def TestBPDN2():
    # ________________________________________
    print 'Test: basis pursuit decomposition'

    fs = 8000
    btmp = audiolab.wavread('glockenspiel.wav')[0]
    b = samplerate.resample(btmp, fs/44100.,'sinc_best')
    L = len(b)

    A = GaborBlock(L,1024)
    B = GaborBlock(A.M,64)
    C = DictionaryUnion(A,B)
    b = np.hstack((b,np.zeros(C.M-L)))
    
    e = 1e-2
    x = BPDN(C,b,e,100)
    ye = np.real(C.dot(x))
    print 'Error (should be <= %f): %f' % (e,np.sum((b-ye)**2))
    print '----------------------------------------'
    
    xtone = x[:A.N]
    xtrans = x[A.N:]
    ytone = np.real(A.dot(xtone))
    ytrans = np.real(B.dot(xtrans))

    audiolab.wavwrite(ytone,'ytone.wav',fs)
    audiolab.wavwrite(ytrans,'ytrans.wav',fs)

    # tonal decomp
    m = np.log10(np.abs(A.conj().transpose().dot(ytone)))
    tfgrid = np.reshape(range(0,A.N),(A.N/A.fftLen,A.fftLen))
    tfgrid = tfgrid[:,:A.fftLen/2+1]

    pyplot.subplot(2,1,1)
    pyplot.imshow(m[tfgrid].transpose(), aspect='auto', interpolation='bilinear', origin='lower')

    # transient decomp
    m = np.log10(np.abs(B.conj().transpose().dot(ytrans)))
    tfgrid = np.reshape(range(0,B.N),(B.N/B.fftLen,B.fftLen))
    tfgrid = tfgrid[:,:B.fftLen/2+1]

    pyplot.subplot(2,1,2)
    pyplot.imshow(m[tfgrid].transpose(), aspect='auto', interpolation='bilinear', origin='lower')

    pyplot.show()
예제 #38
0
def resample_audio(samples, current_sample_rate, new_sample_rate):
    '''Resamples audio samples

    Given an audio sample and the current sample rate, resample to a new
    sample rate.

    Args:
        samples: The audio samples from wavfile.read(<file>)[0]
        current_sample_rate: The current sample rate from wavfile.read(<file>)[1]
        new_sample_rate: The new sample rate

    Returns:
        A tuple with the new sample rate and the new audio samples. This
        is exactly similar to wavfile.read(<filename>)
    '''
    return new_sample_rate, resample(samples,
                                     new_sample_rate / current_sample_rate,
                                     'sinc_best')
예제 #39
0
파일: core.py 프로젝트: wnstlr/librosa
def resample(y, orig_sr, target_sr, res_type='sinc_fastest'):
    """Resample a time series from orig_sr to target_sr

    :usage:
        >>> # Downsample from 22 KHz to 8 KHz
        >>> y, sr   = librosa.load('file.wav', sr=22050)
        >>> y_8k    = librosa.resample(y, sr, 8000)

    :parameters:
      - y           : np.ndarray
          audio time series

      - orig_sr     : int
          original sampling rate of ``y``

      - target_sr   : int
          target sampling rate

      - res_type    : str
          resample type (see note)

    :returns:
      - y_hat       : np.ndarray
          ``y`` resampled from ``orig_sr`` to ``target_sr``

    .. note::
        If scikits.samplerate is installed, resample will use res_type
        otherwise, it will fall back on scipy.signal.resample

    """

    if orig_sr == target_sr:
        return y

    if _HAS_SAMPLERATE:
        y_hat = samplerate.resample(y.T,
                                    float(target_sr) / orig_sr,
                                    res_type).T
    else:
        n_samples = y.shape[-1] * target_sr / orig_sr
        y_hat = scipy.signal.resample(y, n_samples, axis=-1)

    return np.ascontiguousarray(y_hat)
예제 #40
0
    def resample(self, target_fs, mode="sinc_best"):
        """
        Resample the signal. One implication of the signal being digital, is that the resulting sampling
        frequency is not guaranteed to be exactly at `target_fs`.
        This method wraps :func:`~samplerate.resample`

        :param target_fs: The new sampling frequency
        :type target_fs: float
        :param mode: to be passed to :func:`~scikits.samplerate.resample`
        :type mode: str
        :return:
        """
        # num = target_fs * self.size / self.fs
        ratio = target_fs / self.fs

        out = resample(self,ratio,mode)
        real_fs = out.size * self.fs / float(self.size)
        out = self._copy_attrs_to_array(out, fs=real_fs)
        return out
예제 #41
0
def load_wav(filename):
    '''
    '''
    f = open(filename, "r")

    nchannels, samplewidth, framerate, nframes,\
        _, _ = f.getparams()
    frames = f.readframes(nframes)
    f.close()

    if samplewidth == 1:
        dtype = np.uint8
        maxvalue = 2 ** 8
    elif samplewidth == 2:
        dtype = np.int16
        maxvalue = 2 ** (16 - 1)
    elif samplewidth == 4:
        dtype = np.int32
        maxvalue = 2 ** (32 - 1)
    else:
        print "Error: unsupported samplewidth {}".format(samplewidth)
        return None

    all_samples = np.fromstring(frames, dtype)

    combined_samples = np.zeros(nframes)

    for i in xrange(nchannels):
        a = combined_samples
        b = all_samples[i::nchannels]
        condition = np.less(np.abs(a), np.abs(b))
        combined_samples = np.choose(condition, [a, b])

    normalized_samples = combined_samples / maxvalue
    if dtype == np.uint8:
        normalized_samples = 2 * normalized_samples - 1

    desired_framerate = 16384.
    resampled_samples = resample(normalized_samples,
                                 desired_framerate / framerate, 'sinc_best')

    return Signal(resampled_samples, desired_framerate, filename)
예제 #42
0
    def generate_scale(self):
        """
        Given the initial note, middle C, create the rest of the musical scale by
        resampling.
    
        Returns: Dictionary of musical scale with the key being the name of the note
        and the value being the corresponding sound object.
    
        """
        pygame.mixer.init()

        wav = util.get_app_path() + "res/piano-c.wav"
        sound = pygame.mixer.Sound(wav)

        pygame.mixer.set_num_channels(32)
        sndarray = pygame.sndarray.array(sound)

        ratio_dict = {
            'low_c': 1,
            'c_sharp': .944,
            'd': .891,
            'd_sharp': .841,
            'e': .794,
            'f': .749,
            'f_sharp': .707,
            'g': .667,
            'g_sharp': .63,
            'a': .594,
            'a_sharp': .561,
            'b': .53,
            'high_c': .5
        }

        # Generate the Sound objects from the dictionary.
        scale = {}
        for key, value in ratio_dict.iteritems():
            smp = resample(sndarray, value,
                           "sinc_fastest").astype(sndarray.dtype)
            # Use the key, currently a string, as a variable
            scale[key] = pygame.sndarray.make_sound(smp)

        self.scale_dict = scale
예제 #43
0
def resample(y, orig_sr, target_sr, fix=True, scale=False):
    """Resample a time series from orig_sr to target_sr
    Args:
        y : np.ndarray [shape=(n,) or shape=(2, n)]
            audio time series.  Can be mono or stereo.
        orig_sr : number > 0 [scalar]
            original sampling rate of `y`
        target_sr : number > 0 [scalar]
            target sampling rate
        fix : bool
            adjust the length of the resampled signal to be of size exactly
            `ceil(target_sr * len(y) / orig_sr)`
        scale : bool
            Scale the resampled signal so that `y` and `y_hat` have
            approximately equal total energy.
    Returns
        y_hat : np.ndarray [shape=(n * target_sr / orig_sr,)]
            `y` resampled from `orig_sr` to `target_sr`
    """
    if y.ndim > 1:
        return np.vstack([resample(yi, orig_sr, target_sr, fix=fix)
                          for yi in y])
    valid_audio(y, mono=True)
    if orig_sr == target_sr:
        return y
    ratio = float(target_sr) / orig_sr
    n_samples = int(np.ceil(y.shape[-1] * ratio))
    try:
        # Try to use scikits.samplerate if available
        import scikits.samplerate as samplerate
        y_hat = samplerate.resample(y.T, ratio, 'sinc_best').T
    except ImportError:
        warnings.warn('Could not import scikits.samplerate. '
                      'Falling back to scipy.signal')
        import scipy.signal
        y_hat = scipy.signal.resample(y, n_samples, axis=-1)
    if fix:
        y_hat = fix_length(y_hat, n_samples)
    if scale:
        y_hat /= np.sqrt(ratio)
    return np.ascontiguousarray(y_hat, dtype=y.dtype)
예제 #44
0
def resample(y, orig_sr, target_sr, res_type='sinc_fastest'):
    """Resample a signal from orig_sr to target_sr

    :usage:
        >>> # Downsample from 22 KHz to 8 KHz
        >>> y, sr   = librosa.load('file.wav', sr=22050)
        >>> y_8k    = librosa.resample(y, sr, 8000)

    :parameters:
      - y           : np.ndarray
          audio time series 

      - orig_sr     : int
          original sampling rate of ``y``

      - target_sr   : int
          target sampling rate

      - res_type    : str
          resample type (see note)
    
    :returns:
      - y_hat       : np.ndarray
          ``y`` resampled from ``orig_sr`` to ``target_sr``

    .. note::
        If scikits.samplerate is installed, resample will use res_type
        otherwise, it will fall back on scipy.signal.resample

    """

    if orig_sr == target_sr:
        return y

    if _HAS_SAMPLERATE:
        y_hat = samplerate.resample(y.T, float(target_sr) / orig_sr, res_type).T
    else:
        n_samples = y.shape[-1] * target_sr / orig_sr
        y_hat = scipy.signal.resample(y, n_samples, axis=-1)

    return y_hat
예제 #45
0
 def prepareData(self):
     if self.newSampleRate is not None:
         resamplingFactor = float(self.newSampleRate)/float(self.sampleRate)
     elif self.filterb is None:
         print("skipping data preparation, nothing to do")
         return
     
     newChannels = [None for x in xrange(len(self.channels))]
     for i, channel in itertools.izip(xrange(len(self.channels)), self.channels):
         print("channel {0}".format(i))
         newData = channel
         if self.filterb is not None:
             print("  lowpass")
             newData = signal.lfilter(self.filterb, self.filtera, channel)
         if self.newSampleRate is not None:
             print("  resampling")
             newData = samplerate.resample(numpy.array(channel), resamplingFactor, 'sinc_best' if self.filterb is None else 'sinc_medium')
         newChannels[i] = newData
     self.channels = newChannels
     if self.newSampleRate is not None:
         self.sampleRate = self.newSampleRate
예제 #46
0
def test_multi_channel():
    fs = 16000.
    fr = 8000.

    f0 = 440.

    # Create a small stereo audio array with dephased channel
    xleft = np.sin(2 * np.pi * f0/fs * np.arange(0, 2 * fs))
    xright = np.cos(2 * np.pi * f0/fs * np.arange(0, 2 * fs))
    y = np.empty((xleft.size, 2), np.float)
    y[:,0] = xleft
    y[:,1] = xright

    # Upsampled reference
    z_rleft = np.sin(2 * np.pi * f0/fr * np.arange(0, 2 * fr))
    z_rright = np.cos(2 * np.pi * f0/fr * np.arange(0, 2 * fr))
    z_r = np.vstack((z_rleft, z_rright)).T

    z = resample(y, fr /fs, 'sinc_best')

    assert np.max(np.abs(z_r[100:-100] - z[100:-99])) < 1e-2
예제 #47
0
def main(dbFilename, targetFs, force=False):
	util.createDirectory(NormalizeDir)

	rirDb = json.load(open(dbFilename))

	bar = util.ConsoleProgressBar()
	bar.start('Normalize RIRs')
	i = 0
	for rirId, rir in rirDb.items():
		targetFilename = os.path.join(NormalizeDir, rir['id'] + '.wav')
		if not force:
			if rir['filename'] == targetFilename and \
				rir['fs'] == targetFs and \
				targetFilename:
				continue

		x, fs_x = sf.read(os.path.join(ImportDir, rir['id'] + '.wav'), dtype='float32')
		y, fs_y = x, fs_x

		if fs_y != targetFs:
			y = resample(y, targetFs / fs_y, 'sinc_best')
			fs_y = targetFs

		rir['length_org'] = len(y) / fs_y
		y = util.trimSilence(y, 0.001, trimRight=False)
		y = util.normalizeAmplitude(y)

		sf.write(targetFilename, y, fs_y)
		
		rir['filename'] = targetFilename
		rir['fs'] = fs_y
		rir['length'] = len(y) / fs_y

		i += 1
		bar.progress(i / len(rirDb))
	bar.end()

	with open(dbFilename, 'w') as dbFile:
		json.dump(rirDb, dbFile, sort_keys=True, indent=4)
예제 #48
0
    def resample(self):
        plotBA = False
        if type(self.original) is list:
            self.original = numpy.asarray(self.original)
        if self.logs :
            self.logs.write("Rec.py : resampling recording")

        aa,b,c=mlab.specgram(self.original,NFFT=256,Fs=self.sample_rate)

        to_sample = self.calc_resample_factor()
        self.original   = resample(self.original, float(to_sample)/float(self.sample_rate) , 'sinc_best')
        if plotBA:
            a,b,c=mlab.specgram(self.original,NFFT=256,Fs=to_sample)
            figure(figsize=(25,15))
            subplot(211)
            imshow(20*log10(numpy.flipud(aa)), interpolation='nearest', aspect='auto')
            subplot(212)
            imshow(20*log10(numpy.flipud(a)),interpolation='nearest', aspect='auto')
            savefig(''+self.filename+'.png', dpi=100)
            close()
        self.samples = len(self.original)
        self.sample_rate = to_sample
예제 #49
0
def analyze(sig, samplerate=8000, resample_to=8000):
    e_min_scale = 0.3
    e_min_distance = 1
    e_a_scale = 0.5
    e_W_ms = 2000
    min_len = 30
    if samplerate != resample_to:
        print("resampling")
        sig = srate.resample(sig, resample_to / samplerate, 'sinc_best')

    frames, frame_size = ad.frames(sig, samplerate, 64)
    #ac_peaks = [ad.ac_peaks(frame) for frame in frames]
    #energy = ad.energies(frames, ...)
    #energy = log_energy
    print("getting normalized spectra")
    normalized_spectrum = ad.normalized_spectrum(frames, samplerate)
    print("spectral entropy")
    spectral_entropy = np.fromiter(
        ad.entropy(frame) for frame in normalized_spectrum)
    entopy_t = np.percentile(spectral_entropy, 80)
    se_segments = ad.segments_to_seconds(
        ad.entropy_segment_indexes(spectral_entropy, entropy_t))
    print("energy based computations")
    energy, smooth, en_a, en_t, en_lmin = ad.energy_thresholds(
        sig,
        noise_dist=e_min_distance,
        a_scale=e_a_scale,
        min_scale=e_min_scale,
        W_ms=e_W_ms)
    energy_segments = ad.get_voice_segments(
        smooth, en_t, ad.get_segment_indexes(smooth, en_t, min_len=10))
    #energy_t = ad.thresholds(frames)
    #entropy_t = ad.entropy_t(frames)
    #energy_indexes = ad.get_segment_indexes(x, t, min_len=30)
    #entropy_indexes = ad.get_entropy_indexes(x, t, min_len=30)
    #combination_indexes = ad.get_combined_indexes(x, t, min_len=30)
    return spectral_entropy, se_t, se_segments, energy, en_t, energy_segments
예제 #50
0
파일: aucoder.py 프로젝트: turian/aucoder
def perform_mfcc_on_filename(filename, opts):
    (samplerate, sig) = read_audio_to_numpy(filename)
    opts['samplerate'] = samplerate
    if sig.ndim > 1:
        # Mix to mono
        # TODO: Multi-channel
        nchannels = sig.shape[1]
        sig = n.mean(sig, axis=1)
    else:
        nchannels = 1
    print "Read %s with sample rate %s, #channels = %d" % (filename, samplerate, nchannels)
    
    if (samplerate != desired_samplerate and not FORCE_RESAMPLE):
        print "%s has the wrong samplerate, ignoring" % filename
        return None

    if (samplerate != desired_samplerate and FORCE_RESAMPLE):
        origsig = sig
        sig = resample(origsig, 1.0 * desired_samplerate/samplerate, 'sinc_best')
        print "Resampled file from rate %d to rate %d, shape %s to %s" % (samplerate, desired_samplerate, origsig.shape, sig.shape)

    mfcc_feat = mfcc(sig, **opts)
    print "Computed mfccs on %s" % filename
    return mfcc_feat
예제 #51
0
def batchChordGenerate(taskList, prefix):
    '''
    inputs: a list of lists
    each sublist contains the following values @ indices:
        0 : vox collection index
        1 : vox sample index
        2 : a list: [sample #, transposition semitones, transposition cents]
        3 : ... and so on ...
    '''
    for i, t in enumerate(taskList):
        print "begin task for vocal gesture " + p(t[0]) + '_' + p(t[1])
        # grab the vocal gesture:
        gestur = vox[t[0]][t[1]]
        # compute clicks to be applied in all files:
        idx, l = clicks(gestur)
        clk = [idx, l]
        # compute envelope to be applied to all files:
        env = envelope(gestur, 10, 30)
        env *= (1 / env.max())  # normalize envelope
        # proceed with computing:
        fp = prefix + '_' + p(i) + '__' + p(t[0]) + '_' + p(t[1]) + '__'
        sr = None
        enc = None
        for j in xrange(2, len(t)):
            fname = fp + str(j - 2) + '__' + str(t[j][0]) + '.wav'
            print "    computing " + str(j) + ' : ' + fname
            transpratio = tr(t[j][1], t[j][2])
            # print "    transposition : " + str(t[j][1]) + ' ' + str(t[j][2]) + ' = ' + str(transpratio)
            rezs = []  # array of np.arrays containing transposed resonances
            for f in coll_safe[t[j][0]]:
                # print "         file : " + f
                x, sr, enc = wavread(f)
                rezs.append(resample(x, transpratio, 'sinc_best'))
            wavwrite(resonancesChord(None, rezs, 30, clk, env), fname, 44100,
                     'pcm24')
    return
예제 #52
0
def preprocess(db, stations, comps, goal_day, params, responses=None):
    """
    Fetches data for each ``stations`` and each ``comps`` using the
    data_availability table in the database.

    To correct for instrument responses, make sure to set ``remove_response``
    to "Y" in the config and to provide the ``responses`` DataFrame.

    :Example:
    >>> from msnoise.api import connect, get_params, preload_instrument_responses
    >>> from msnoise.preprocessing import preprocess
    >>> db = connect()
    >>> params = get_params(db)
    >>> responses = preload_instrument_responses(db)
    >>> st = preprocess(db, ["YA.UV06","YA.UV10"], ["Z",], "2010-09-01", params, responses)
    >>> st
     2 Trace(s) in Stream:
    YA.UV06.00.HHZ | 2010-09-01T00:00:00.000000Z - 2010-09-01T23:59:59.950000Z | 20.0 Hz, 1728000 samples
    YA.UV10.00.HHZ | 2010-09-01T00:00:00.000000Z - 2010-09-01T23:59:59.950000Z | 20.0 Hz, 1728000 samples

    :type db: :class:`sqlalchemy.orm.session.Session`
    :param db: A :class:`~sqlalchemy.orm.session.Session` object, as
        obtained by :func:`msnoise.api.connect`.
    :type stations: list of str
    :param stations: a list of station names, in the format NET.STA.
    :type comps: list of str
    :param comps: a list of component names, in Z,N,E,1,2.
    :type goal_day: str
    :param goal_day: the day of data to load, ISO 8601 format: e.g. 2016-12-31.
    :type params: class
    :param params: an object containing the config parameters, as obtained by
        :func:`msnoise.api.get_params`.
    :type responses: :class:`pandas.DataFrame`
    :param responses: a DataFrame containing the instrument responses, as
        obtained by :func:`msnoise.api.preload_instrument_responses`.
    :rtype: :class:`obspy.core.stream.Stream`
    :return: A Stream object containing all traces.
    """
    datafiles = {}
    output = Stream()
    for station in stations:
        datafiles[station] = {}
        net, sta = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(db,
                                      net=net,
                                      sta=sta,
                                      starttime=gd,
                                      endtime=gd)
        for comp in comps:
            datafiles[station][comp] = []
        for file in files:
            if file.comp[-1] not in comps:
                continue
            fullpath = os.path.join(file.path, file.file)
            datafiles[station][file.comp[-1]].append(fullpath)

    for istation, station in enumerate(stations):
        net, sta = station.split(".")
        for comp in comps:
            files = eval("datafiles['%s']['%s']" % (station, comp))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" %
                              (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    try:
                        st = read(file,
                                  dytpe=np.float,
                                  starttime=UTCDateTime(gd),
                                  endtime=UTCDateTime(gd) + 86400)
                    except:
                        logging.debug("ERROR reading file %s" % file)
                        continue
                    for tr in st:
                        if len(tr.stats.channel) == 2:
                            tr.stats.channel += tr.stats.location
                            tr.stats.location = "00"
                    tmp = st.select(network=net, station=sta, component=comp)
                    if not len(tmp):
                        for tr in st:
                            tr.stats.network = net
                        st = st.select(network=net,
                                       station=sta,
                                       component=comp)
                    else:
                        st = tmp
                    for tr in st:
                        tr.data = tr.data.astype(np.float)
                        tr.stats.network = tr.stats.network.upper()
                        tr.stats.station = tr.stats.station.upper()
                        tr.stats.channel = tr.stats.channel.upper()

                    stream += st
                    del st
                stream.sort()
                try:
                    # HACK not super clean... should find a way to prevent the
                    # same trace id with different sps to occur
                    stream.merge(method=1,
                                 interpolation_samples=3,
                                 fill_value=None)
                except:
                    logging.debug("Error while merging...")
                    traceback.print_exc()
                    continue
                stream = stream.split()
                if not len(stream):
                    continue
                logging.debug("%s Checking sample alignment" % stream[0].id)
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(trace)

                logging.debug("%s Checking Gaps" % stream[0].id)
                if len(getGaps(stream)) > 0:
                    max_gap = params.preprocess_max_gap * stream[
                        0].stats.sampling_rate
                    only_too_long = False
                    while getGaps(stream) and not only_too_long:
                        too_long = 0
                        gaps = getGaps(stream)
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                try:
                                    stream[gap[0]] = stream[gap[0]].__add__(
                                        stream[gap[1]],
                                        method=1,
                                        fill_value="interpolate")
                                    stream.remove(stream[gap[1]])
                                except:
                                    stream.remove(stream[gap[1]])

                                break
                            else:
                                too_long += 1
                        if too_long == len(gaps):
                            only_too_long = True

                stream = stream.split()
                for tr in stream:
                    if tr.stats.sampling_rate < (params.goal_sampling_rate -
                                                 1):
                        stream.remove(tr)
                taper_length = 20.0  # seconds
                for trace in stream:
                    if trace.stats.npts < 4 * taper_length * trace.stats.sampling_rate:
                        stream.remove(trace)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        trace.taper(max_percentage=None, max_length=1.0)

                if not len(stream):
                    logging.debug(" has only too small traces, skipping...")
                    continue

                for trace in stream:
                    logging.debug("%s Highpass at %.2f Hz" %
                                  (trace.id, params.preprocess_highpass))
                    trace.filter("highpass",
                                 freq=params.preprocess_highpass,
                                 zerophase=True)

                    if trace.stats.sampling_rate != params.goal_sampling_rate:
                        logging.debug("%s Lowpass at %.2f Hz" %
                                      (trace.id, params.preprocess_lowpass))
                        trace.filter("lowpass",
                                     freq=params.preprocess_lowpass,
                                     zerophase=True,
                                     corners=8)

                        if params.resampling_method == "Resample":
                            logging.debug(
                                "%s Downsample to %.1f Hz" %
                                (trace.id, params.goal_sampling_rate))
                            trace.data = resample(
                                trace.data, params.goal_sampling_rate /
                                trace.stats.sampling_rate, 'sinc_fastest')

                        elif params.resampling_method == "Decimate":
                            decimation_factor = trace.stats.sampling_rate / params.goal_sampling_rate
                            if not int(decimation_factor) == decimation_factor:
                                logging.warning(
                                    "%s CANNOT be decimated by an integer factor, consider using Resample or Lanczos methods"
                                    " Trace sampling rate = %i ; Desired CC sampling rate = %i"
                                    % (trace.id, trace.stats.sampling_rate,
                                       params.goal_sampling_rate))
                                sys.stdout.flush()
                                sys.exit()
                            logging.debug("%s Decimate by a factor of %i" %
                                          (trace.id, decimation_factor))
                            trace.data = trace.data[::int(decimation_factor)]

                        elif params.resampling_method == "Lanczos":
                            logging.debug(
                                "%s Downsample to %.1f Hz" %
                                (trace.id, params.goal_sampling_rate))
                            trace.data = np.array(trace.data)
                            trace.interpolate(
                                method="lanczos",
                                sampling_rate=params.goal_sampling_rate,
                                a=1.0)

                        trace.stats.sampling_rate = params.goal_sampling_rate

                if params.remove_response:
                    logging.debug('%s Removing instrument response' %
                                  stream[0].id)

                    response = responses[responses["channel_id"] ==
                                         stream[0].id]
                    if len(response) > 1:
                        response = response[
                            response["start_date"] <= UTCDateTime(gd)]
                    if len(response) > 1:
                        response = response[
                            response["end_date"] >= UTCDateTime(gd)]
                    elif len(response) == 0:
                        logging.info("No instrument response information "
                                     "for %s, skipping" % stream[0].id)
                        continue
                    try:
                        datalesspz = response["paz"].values[0]
                    except:
                        logging.error("Bad instrument response information "
                                      "for %s, skipping" % stream[0].id)
                        continue
                    stream.simulate(
                        paz_remove=datalesspz,
                        remove_sensitivity=True,
                        pre_filt=params.response_prefilt,
                        paz_simulate=None,
                    )
                for tr in stream:
                    tr.data = tr.data.astype(np.float32)
                output += stream
                del stream
            del files
    clean_scipy_cache()
    return output
예제 #53
0
def _downsample_and_upsample(data):
    ds = numpy.array([resample(x, 0.5, 'sinc_best') for x in data[0]])
    us = numpy.array([resample(x, 2, 'sinc_best') for x in ds])
    return (us, )
예제 #54
0
def preprocess(db, stations, comps, goal_day, params, responses=None):

    datafiles = {}
    output = Stream()
    for station in stations:
        datafiles[station] = {}
        net, sta = station.split('.')
        gd = datetime.datetime.strptime(goal_day, '%Y-%m-%d')
        files = get_data_availability(db,
                                      net=net,
                                      sta=sta,
                                      starttime=gd,
                                      endtime=gd)
        for comp in comps:
            datafiles[station][comp] = []
        for file in files:
            if file.comp[-1] not in comps:
                continue
            fullpath = os.path.join(file.path, file.file)
            datafiles[station][file.comp[-1]].append(fullpath)

    for istation, station in enumerate(stations):
        net, sta = station.split(".")
        for comp in comps:
            files = eval("datafiles['%s']['%s']" % (station, comp))
            if len(files) != 0:
                logging.debug("%s.%s Reading %i Files" %
                              (station, comp, len(files)))
                stream = Stream()
                for file in sorted(files):
                    st = read(file,
                              dytpe=np.float,
                              starttime=UTCDateTime(gd),
                              endtime=UTCDateTime(gd) + 86400)
                    tmp = st.select(network=net, station=sta, component=comp)
                    if not len(tmp):
                        for tr in st:
                            tr.stats.network = net
                        st = st.select(network=net,
                                       station=sta,
                                       component=comp)
                    else:
                        st = tmp
                    for tr in st:
                        tr.data = tr.data.astype(np.float)
                    stream += st
                    del st
                stream.sort()
                stream.merge(method=1,
                             interpolation_samples=3,
                             fill_value=None)
                stream = stream.split()

                logging.debug("Checking sample alignment")
                for i, trace in enumerate(stream):
                    stream[i] = check_and_phase_shift(trace)

                logging.debug("Checking Gaps")
                if len(getGaps(stream)) > 0:
                    max_gap = 10
                    only_too_long = False
                    while getGaps(stream) and not only_too_long:
                        too_long = 0
                        gaps = getGaps(stream)
                        for gap in gaps:
                            if int(gap[-1]) <= max_gap:
                                stream[gap[0]] = stream[gap[0]].__add__(
                                    stream[gap[1]],
                                    method=1,
                                    fill_value="interpolate")
                                stream.remove(stream[gap[1]])
                                break
                            else:
                                too_long += 1
                        if too_long == len(gaps):
                            only_too_long = True
                stream = stream.split()
                taper_length = 20.0  # seconds
                for trace in stream:
                    if trace.stats.npts < 4 * taper_length * trace.stats.sampling_rate:
                        stream.remove(trace)
                    else:
                        trace.detrend(type="demean")
                        trace.detrend(type="linear")
                        trace.taper(max_percentage=None, max_length=1.0)

                if not len(stream):
                    logging.debug(" has only too small traces, skipping...")
                    continue

                for trace in stream:
                    logging.debug("%s.%s Highpass at %.2f Hz" %
                                  (station, comp, params.preprocess_highpass))
                    trace.filter("highpass",
                                 freq=params.preprocess_highpass,
                                 zerophase=True)

                    if trace.stats.sampling_rate != params.goal_sampling_rate:
                        logging.debug(
                            "%s.%s Lowpass at %.2f Hz" %
                            (station, comp, params.preprocess_lowpass))
                        trace.filter("lowpass",
                                     freq=params.preprocess_lowpass,
                                     zerophase=True,
                                     corners=8)

                        if params.resampling_method == "Resample":
                            logging.debug(
                                "%s.%s Downsample to %.1f Hz" %
                                (station, comp, params.goal_sampling_rate))
                            trace.data = resample(
                                trace.data, params.goal_sampling_rate /
                                trace.stats.sampling_rate, 'sinc_fastest')

                        elif params.resampling_method == "Decimate":
                            decimation_factor = trace.stats.sampling_rate / params.goal_sampling_rate
                            if not int(decimation_factor) == decimation_factor:
                                logging.warning(
                                    "%s.%s CANNOT be decimated by an integer factor, consider using Resample or Lanczos methods"
                                    " Trace sampling rate = %i ; Desired CC sampling rate = %i"
                                    %
                                    (station, comp, trace.stats.sampling_rate,
                                     params.goal_sampling_rate))
                                sys.stdout.flush()
                                sys.exit()
                            logging.debug("%s.%s Decimate by a factor of %i" %
                                          (station, comp, decimation_factor))
                            trace.data = trace.data[::int(decimation_factor)]

                        elif params.resampling_method == "Lanczos":
                            logging.debug(
                                "%s.%s Downsample to %.1f Hz" %
                                (station, comp, params.goal_sampling_rate))
                            trace.data = np.array(trace.data)
                            trace.interpolate(
                                method="lanczos",
                                sampling_rate=params.goal_sampling_rate,
                                a=1.0)

                        trace.stats.sampling_rate = params.goal_sampling_rate

                if get_config(db, 'remove_response', isbool=True):
                    logging.debug('%s Removing instrument response' %
                                  stream[0].id)
                    response_prefilt = eval(get_config(db, 'response_prefilt'))

                    response = responses[responses["channel_id"] ==
                                         stream[0].id]
                    if len(response) > 1:
                        response = response[
                            response["start_date"] < UTCDateTime(gd)]
                        response = response[
                            response["end_date"] > UTCDateTime(gd)]
                    elif len(response) == 0:
                        logging.info("No instrument response information "
                                     "for %s, exiting" % stream[0].id)
                        sys.exit()
                    datalesspz = response["paz"].values[0]
                    stream.simulate(
                        paz_remove=datalesspz,
                        remove_sensitivity=True,
                        pre_filt=response_prefilt,
                        paz_simulate=None,
                    )
                for tr in stream:
                    tr.data = tr.data.astype(np.float32)
                output += stream
                del stream
            del files
    clean_scipy_cache()
    return 0, output