def __init__( self, parent, dump_vectors_beyond=20, lambda_border=0, two_dim_mode='separate', reinit=False, ): Processor.__init__(self, parent, Default_Preprocessor=Cepstrogram, reinit=reinit) input_vectors = self.parent_processor.feature_vectors input_shape = shape(input_vectors) if len(input_shape) > 2: if False and two_dim_mode == 'separate': print "mode:", two_dim_mode keep_from_band_variations = 3 svd_input_vectors = zeros( (input_shape[0], input_shape[1] * keep_from_band_variations), Float) counter = 0 for spectrum_of_band in parent.feature_vectors: print counter svd_input_vectors[:, counter : counter + keep_from_band_variations - 1] = \ svdeofs.svdeofs(input_vectors)[0][:, :keep_from_band_variations] counter += keep_from_band_variations input_vectors = svd_input_vectors else: input_vectors = reshape( input_vectors, (input_shape[0], input_shape[1] * input_shape[2])) print shape(input_vectors) (z, self.lambdas, self.EOFs) = svdeofs.svdeofs(input_vectors) if not dump_vectors_beyond: if lambda_border: dump_vectors_beyond = argmin( greater(self.lambdas, lambda_border)) if dump_vectors_beyond: self.feature_vectors = z[:, :dump_vectors_beyond] else: self.feature_vectors = z
def __init__(self, parent, framesize = None, hopsize = 100, number_of_vectors_used = 15): if not framesize: lp = len(parent) #logfs = min(int(scipy.log2(lp / 32)), 10) log2 = lambda x: math.log(x) / math.log(2) logfs = min(int(log2(lp / 32)), 10) framesize = pow(2, logfs) print framesize if hopsize > framesize / 4: hopsize = framesize / 4 self.hopsize = hopsize self.framesize = framesize #if len(parent) < framesize: # self._delete_from_parents() # raise UnderflowError, "Thingy is too small...\nHere I pull the plug in order to avoid a segfaulty thingy." input_array = parent self.feature_vectors = None interesting_parts = input_array[:, :number_of_vectors_used] interesting_parts = numarray.transpose(interesting_parts) for row in interesting_parts: specspectrum = calculate_spectrogram(row, framesize = framesize, hopsize = hopsize) import cPickle z, lambdas, EOFs = svdeofs.svdeofs(specspectrum) print ".", s = z[:, :15] svd_fft_fft_vectors = numarray.transpose(s) #print "svd_eofs ok!" if self.feature_vectors is None: self.feature_vectors = numarray.transpose(svd_fft_fft_vectors) else: self.feature_vectors = numarray.concatenate((self.feature_vectors, numarray.transpose(svd_fft_fft_vectors)), 1) #print "svd_fft_fft_vectors shapes:", shape(svd_fft_fft_vectors), shape(self.feature_vectors) z, lambdas, EOFs = svdeofs.svdeofs(self.feature_vectors) self.feature_vectors = z[:, :15] self.arr = self.feature_vectors
def svd_eofs(feature_vectors, number_of_vectors_to_keep=15, show_lambdas=0): # Should be using svd_eofs.SVDEOFs here z, lambdas, EOFs = svdeofs.svdeofs(feature_vectors) if show_lambdas: my_show.show(lambdas) return (z[:, :number_of_vectors_to_keep], lambdas)
def __init__( self, parent, framesize=None, hopsize=100, window_function=signal.hanning, number_of_vectors_used=15, reinit=False, ): Processor.__init__( self, parent, # Default_Preprocessor = Cepstrogram, Default_Preprocessor=Spectrogram, reinit=reinit) if not framesize: lp = len(self.parent_processor) logfs = min(int(log2(lp / 32)), 10) framesize = pow(2, logfs) if hopsize > framesize / 16: hopsize = framesize / 16 print "hopsize: ", hopsize self._set_samplerate(hopsize) self.hopsize = hopsize self.framesize = framesize print "EOF..." if len(self.parent_processor) < framesize: self._delete_from_parents() raise UnderflowError, "Thingy is too small...\nHere I pull the plug in order to avoid a segfaulty thingy." self.svd_fft_fft(self.parent_processor.feature_vectors, framesize=framesize, hopsize=hopsize, window_function=window_function, number_of_vectors_used=number_of_vectors_used) z, lambdas, EOFs = svdeofs.svdeofs(self.feature_vectors) self.feature_vectors = z[:, :15] #self.feature_vectors, self.lambdas = svd_eofs(self.feature_vectors, # number_of_vectors_to_keep = 15) Segmentations.Frames( self, len(self.feature_vectors), self.parent_processor.samplerate, framesize, hopsize, window_function=window_function, )
def __init__( self, parent, framesize=32, hopsize=8, window_function=signal.hanning, number_of_vectors_used=15, reinit=False, ): window_function = signal.hanning Processor.__init__( self, parent, # Default_Preprocessor = Cepstrogram, Default_Preprocessor=Spectrogram, reinit=reinit) self._set_samplerate(hopsize) self.hopsize = hopsize self.framesize = framesize print "EOF..." if len(self.parent_processor) < framesize: self._delete_from_parents() raise UnderflowError, "Thingy is too small...\nHere I pull the plug in order to avoid a segfaulty thingy." self.svd_fft_fft(self.parent_processor.feature_vectors, framesize=framesize, hopsize=hopsize, window_function=window_function, number_of_vectors_used=number_of_vectors_used) self.test = self.feature_vectors z, lambdas, EOFs = svdeofs.svdeofs(self.feature_vectors) self.feature_vectors = z[:, :15] Segmentations.Frames( self, len(self.feature_vectors), self.parent_processor.samplerate, framesize, hopsize, window_function=window_function, )
def ceof_scalar2D(data): """ Estimate the complex EOF on a 2D array. Time should be the first dimension, so that the PC (eigenvalues) will be in respect to the first dimension. """ assert type(data) is np.ndarray, \ "ceof_scalar2D requires an ndarray but got: %s" % type(data) assert np.isfinite(data).all(), \ "ceof_scalar2D requires a full valid values array" # ---- Creating the complex field using Hilbert transform input_H = numpy.empty(data.shape, dtype=data.dtype) for i in range(data.shape[1]): input_H[:, i] = scipy.fftpack.hilbert(data[:, i]) U = data + 1j * input_H pcs, lambdas, eofs = svdeofs(U) return pcs, lambdas, eofs
def ceof_scalar2D(data): """ Estimate the complex EOF on a 2D array. Time should be the first dimension, so that the PC (eigenvalues) will be in respect to the first dimension. """ assert type(data) is np.ndarray, \ "ceof_scalar2D requires an ndarray but got: %s" % type(data) assert np.isfinite(data).all(), \ "ceof_scalar2D requires a full valid values array" # ---- Creating the complex field using Hilbert transform input_H = numpy.empty(data.shape, dtype=data.dtype) for i in range(data.shape[1]): input_H[:,i] = scipy.fftpack.hilbert(data[:,i]) U = data + 1j*input_H pcs, lambdas, eofs = svdeofs(U) return pcs, lambdas, eofs