def makeIR(wav_in,wav_out,fs,duration,noise=0.025): """ measures the response of a speaker (+amp+mic) and build an IR """ # step 1: full duplex playback and recording. Input: provided sweep wav file # output: recorded time response ecasound_cmd="ecasound -f:16,1,%i -a:1 -i jack,system,capture " + \ " -o /tmp/capture.wav -a:2 -i %s -o jack,system -t %i" ecasound_cmd=ecasound_cmd%(int(fs),wav_in,int(duration)) # run capture os.system(ecasound_cmd) # load input and capture wave files time.sleep(3) f=wave.open(wav_in,'rb') len1=f.getnframes() #nc1=f.getnchannels() #bp1=f.getsampwidth() data=f.readframes(len1) f.close() Y1=scipy.float32(scipy.fromstring(data,dtype='int16')) f=wave.open('/tmp/capture.wav','rb') len2=f.getnframes() #nc1=f.getnchannels() #bp1=f.getsampwidth() data=f.readframes(len2) f.close() Y2=scipy.float32(scipy.fromstring(data,dtype='int16')) # truncate and normalize wave file #(or we could pad the shortest to the longest... TODO!) minlen = min([len1,len2]) Y2=Y2[0:minlen] Y2=Y2/max(abs(Y2)) Y1=Y1[0:minlen] Y1=Y1/max(abs(Y1)) # compute frequency response function as ration of both spectra FRF=scipy.fft(Y2)/scipy.fft(Y1) # compute impulse response as inverse FFT of FRF IRraw=scipy.real(scipy.ifft(FRF)) # get rid of initial lag in IR thr=max(abs(IRraw))*noise offset=max([0 , min(min(scipy.where(abs(IRraw)>thr)))-5 ]) IR=IRraw[offset:-1] IRnorm=IR/max(abs(IR)) # TODO: add post pro options such as low/high pass and decay # write output IR f = wave.open(wav_out, 'w') f.setparams((1, 2, fs, 0, 'NONE', 'not compressed')) maxVol=2**15-1.0 #maximum amplitude wvData="" for i in range(len(IRnorm)): wvData+=pack('h', maxVol*IRnorm[i]) f.writeframes(wvData) f.close()
def process(self, sbmp): """Core function""" if sbmp == None: # Oversize required return self.size # reverse height and width under advice (ws, hs, ps) = shape = (sbmp.GetHeight(), sbmp.GetWidth(), 3) simg = wx.ImageFromBitmap(sbmp) sarray = scipy.array(scipy.fromstring(simg.GetData(), 'uint8'), self.dtype) / 255.0 sarray = scipy.rollaxis(scipy.reshape(sarray, shape), 2) self.shape = sarray.shape tarray = (self.withGPU if self.gpgpu else self.withCPU)(sarray) mm = (sarray.min(), sarray.max(), tarray.min(), tarray.max()) #print '\t', sarray.shape, tarray.shape, print type(sarray[0,0,0]), type(tarray[0,0,0]), mm tarray = numpy.nan_to_num(tarray) tarray /= max(tarray.max(), self.coefficient) tarray = scipy.array((tarray * 255.0).tolist(), 'uint8') tarray = scipy.dstack(tarray) timg = wx.EmptyImage(ws, hs) timg .SetData(tarray.tostring()) self.tbmp = timg.ConvertToBitmap() return self.tbmp
def _read_nicolet(self, tmin, tmax): """Load Nicolet BMSI data.""" # print "_read_nicolet: tmin=", tmin, "tmax=", tmax if tmin < 0: tmin = 0 BYTES_PER_SAMPLE = self.channels * 2 indmin = int(self.freq * tmin) NUMSAMPLES = os.path.getsize(self.fullpath) // BYTES_PER_SAMPLE indmax = min(NUMSAMPLES, int(self.freq * tmax)) byte0 = indmin * BYTES_PER_SAMPLE numbytes = (indmax - indmin) * BYTES_PER_SAMPLE self.fh.seek(byte0) data = fromstring(self.fh.read(numbytes), "h") if sys.byteorder == "big": data = data.byteswapped() data = data.astype("d") data.shape = -1, self.channels if self.scale is not None: data = self.scale * data t = (1 / self.freq) * arange(indmin, indmax) # print 'nic', data.shape # print "_read_nicolet: t is " , t return t, data
def readBlobs(self, stream): length = len(stream) blobs = [] while stream.tell() < length: blobType = stream.readUInt32() blobLength = stream.readUInt32() if 0x0c == blobType: zeros = stream.read(0x10) if '\x00' * 0x10 != zeros: raise Exception("Blob type 0xc decoding error") supposedXor = stream.readUInt16() twoZeros = stream.read(2) if '\x00\x00' != twoZeros: raise Exception("Blob type 0xc decoding error") dataLength = stream.readUInt32() zero = stream.readUInt32() if 0 != zero: raise Exception("Blob type 0xc decoding error") somethingImportant = stream.readUInt32() blobData = stream.read(dataLength) dataArray = scipy.fromstring(blobData, scipy.uint16) calcedXor = scipy.bitwise_xor.reduce(dataArray) if calcedXor != supposedXor: raise Exception("Worng XOR check in data blob %x != %x" % (calcedXor, supposedXor)) blobs.append((blobType, (blobData, somethingImportant))) else: blobData = stream.read(blobLength - 8) blobs.append((blobType, blobData)) return blobs
def readData(self,ob): f = open(ob.filename,'rb') f.seek(int(self._header['STM image list']['Data offset'])) data = f.read(int(self._header['STM image list']['Data length'])) ob.d = scipy.fromstring(data,dtype=scipy.int16) ob.d.shape = ob.XRes, ob.YRes ob.d = scipy.flipud(ob.d)
def _read_float_array(self, fname): """Load an array of C floats.""" fh = file(fname, "rb") data = fromstring(fh.read(), "f") data.shape = -1, self.channels return data
def encrypt( self, data, base=0x1000084 ): if isinstance(data, (ObjectWithStreamBigEndian, ObjectWithStream)): data = data.getRawData() data = scipy.fromstring(data, scipy.uint16) data.byteswap(True) data = self.encryptChunk(data, base=base) data.byteswap(True) return data
def decrypt( self, data, base=0x1000084 ): if isinstance(data, (ObjectWithStreamBigEndian, ObjectWithStream)): data = data.getRawData() data = scipy.fromstring(data, scipy.uint16) data.byteswap(True) data = self.decryptChunk(data, base) data.byteswap(True) data = scipy.bitwise_xor(data, data[-1]) return data.tostring()
def callback(in_data, frame_count, time_info, status): # data = q.get() d = self.wf.readframes(frame_count) print frame_count buf = scipy.fromstring(d, scipy.int16) d2 = scipy.signal.lfilter(IIR_b, IIR_a, buf) data = scipy.int16(d2).tostring() return (data, pyaudio.paContinue)
def mainLoop(self): global terminated, switch # start Recording audio = pyaudio.PyAudio() stream = audio.open(format=FORMAT, channels=CHANNELS, rate=FS, input=True, frames_per_buffer=CHUNK) # Cleaning the file. In case of a force quit, the file keep the last run data open('out.bin', 'wb').close() last_switch = 0 sound = [] while not terminated: if recording: # get audio samples data = stream.read(CHUNK) orig = fromstring(data, dtype="int16") sound = np.append(sound, orig) # Transform to frequency domain (FFT) # reduce noise and transform back to time originalfft = np.array(fft(orig)) fftSpec = abs(originalfft) / (CHUNK / 2) fftSpec = fftSpec[:int(CHUNK / 2)] xf = 1.0 * np.arange(0, FS / 2., FS / (1. * CHUNK)) # Spectrogram self.img_array = np.roll(self.img_array, -1, 0) self.img_array[-1:] = 10.0 * np.log10(fftSpec) # Plotting Graphs self.origWave.plot(orig, clear=True) self.fftItem.plot(xf, fftSpec, clear=True) self.specItem.setImage(self.img_array, autoLevels=False) else: if last_switch != switch: last_switch = switch frames = [] # Write in file f = open('out.bin', 'ab') for i in range(len(sound)): f.write('%d ' % sound[i]) f.write('\n') f.close() sound = [] QtGui.QApplication.processEvents() stream.stop_stream() stream.close() audio.terminate()
def readData(self,ob): f = open(ob.filename,'rb') f.seek(int(self._header['Data offset'])) points = int(self._header['SamplesT']) datalength = ob.XRes * ob.YRes * points * 2 data = f.read(datalength) ob.d = scipy.fromstring(data,dtype=scipy.int16) ob.d.shape = ob.XRes, ob.YRes, points ob.d = scipy.flipud(ob.d)
def readWaveAsFloat(wf): length = wf.getnframes() data = wf.readframes(length) data = sp.fromstring(data, sp.int16) # data = [1 if e is None else 0 for e in data] # print(sum(data)) # data = np.append(data, [0] * 1024) data = np.asarray(data, dtype="float64") data = data / 32768. print(data.shape) return data
def read_signal(filename, winsize): wf = wave.open(filename, 'rb') n = wf.getnframes() str = wf.readframes(n) params = ((wf.getnchannels(), wf.getsampwidth(), wf.getframerate(), wf.getnframes(), wf.getcomptype(), wf.getcompname())) siglen = ((int)(len(str) / 2 / winsize) + 1) * winsize signal = sp.zeros(siglen, sp.int16) signal[0:len(str) / 2] = sp.fromstring(str, sp.int16) return [signal, params]
def open(self, wavefile): self.wavefile = wave.open(wavefile, 'rb') frames = self.wavefile.readframes(self.wavefile.getnframes()) # -1~+1の範囲に正規化 self.wavedata = sp.fromstring(frames, dtype='int16') / 32768.0 self.wavefile.rewind() self.proc = subprocess.Popen(['aplay', '-q', wavefile]) # 再生開始時のずれ対策のため,start()まで待ち合わせ self.proc.send_signal(signal.SIGSTOP) self.start_time = time.time() # 再生開始時刻 self.stop_time = time.time() # 停止時刻 self.spend_time = 0.0 # 停止時間
def __init__(self, s): self.data = fromstring(s[:18000], UInt8) self.data.shape = -1, 18 self.timestamp = unpack('8s', s[18000:18008])[0].strip() # char [8] self.rec_no = unpack('L', s[18008:18012])[0] # unsigned long self.ux_time = unpack('L', s[18012:18016])[0] # unsigned long self.smimage = unpack('8s', s[18016:18024])[0].strip() # char [8] self.ox_rec_ptr = unpack('B', s[18024])[0] # unsigned char self.oxes= unpack('64B', s[18025:18089]) # unsigned char [64] self.rates = unpack('64B', s[18089:18153]) # unsigned char [64] self.ox_acq_time = unpack('64H', s[18153:18281]) # unsigned int [64] (ushort?) self.filstruct = unpack('150s', s[18281:18431])[0].strip() # char [150]
def read(fname, winsize): if fname == "-": wf = wave.open(sys.stdin, 'rb') n = wf.getnframes() str = wf.readframes(n) params = ((wf.getnchannels(), wf.getsampwidth(), wf.getframerate(), wf.getnframes(), wf.getcomptype(), wf.getcompname())) siglen = ((int)(len(str)/2/winsize) + 1) * winsize signal = sp.zeros(siglen, sp.float32) signal[0:len(str)/2] = sp.float32(sp.fromstring(str, sp.int16))/32767.0 return signal, params else: return read_signal(fname, winsize)
def load(filename, network=None): r""" Opens a 'csv' file, reads in the data, and adds it to the **Network** Parameters ---------- filename : string (optional) The name of the file containing the data to import. The formatting of this file is outlined below. Returns ------- If no Network object is supplied then one will be created and returned. """ net = {} with _read_file(filename=filename, ext='csv') as f: a = _pd.read_table(filepath_or_buffer=f, sep=',', skipinitialspace=True, index_col=False, true_values=['T', 't', 'True', 'true', 'TRUE'], false_values=['F', 'f', 'False', 'false', 'FALSE']) # Now parse through all the other items for item in a.keys(): element = item.split('.')[0] prop = item.split('.', maxsplit=1)[1] data = _sp.array(a[item].dropna()) if type(data[0]) is str: N = _sp.shape(data)[0] if '.' in data[0].split(' ')[0]: # Decimal means float dtype = float else: dtype = int temp = _sp.empty(_sp.shape(data), dtype=object) for row in range(N): temp[row] = _sp.fromstring(data[row], sep=' ', dtype=dtype) data = _sp.vstack(temp) else: dtype = type(data[0]) net[element+'.'+prop] = data.astype(dtype) if network is None: network = OpenPNM.Network.GenericNetwork() network = _update_network(network=network, net=net) return network
def spectrumAnalyzer(self): # FFT する信号の初期化 signal = zeros(fftLen, dtype=float) # Update print('音声入力ループ') sound_data = [] # 適当に5秒間実行して終了 for n in xrange(0, self.fs * 7 / self.chunk): try: # dataは文字列型 data = self.stream.read(self.chunk) except IOError as ex: # よくわからんけど、しばらく実行していると結構な頻度でミスってる if ex[1] != pyaudio.paInputOverflowed: raise data = '\x00' * self.chunk print('errorrrrrrrrrrrrrrrrrr') num_data = fromstring(data, dtype='int16') signal = roll(signal, - self.chunk) signal[- len(num_data):] = num_data fftspec = my_fft(signal) spec = abs(fftspec[1: fftLen / 2 + 1]) * signal_scale # スペクトル # print('Max : %.5f' % freq_list[np.argmax(spec)]) if max(spec) >= 2000 and np.argmax(spec) > 3: print('spec: %s' % str(max(spec))) max_list_num = np.argmax(spec) print('Max : %8.3f , %s, %s' % ( self.freq_list[max_list_num][0], self.my_piano.octa_mark[self.freq_list[max_list_num][1]], self.my_piano.onkai[self.freq_list[max_list_num][2]]) ) sound_data.append(phone_scale[self.freq_list[max_list_num][2]]) else: print('none') sound_data.append('N') self.specItem.plot(spec, clear=True) QtGui.QApplication.processEvents() self.save_score_data(sound_data, self.chunk/float(self.fs), OUTPUT_FILE_NAME) self.stream.close() self.p.terminate()
def img_from_fig(fig): """produce :Image: instance from :fig: :type fig: matplotlib.figure.Figure :param fig: input figure :rtype: Image.Image """ if not isinstance(fig, Figure): raise TypeError('fig must be a %s' % Figure) if fig.canvas is None: cvs = FigureCanvasAgg(fig) fig.canvas.draw() rgb = fig.canvas.tostring_rgb() rgb = sp.fromstring(rgb, dtype=sp.uint8) rgb.shape = map(int, fig.bbox.bounds[2:]) + [3] return MRPlot.img_from_rgb(rgb)
def writeBlobs(self, outputStream, blobs, address, plain): for blobType, blobData in blobs: outputStream.writeUInt32(blobType) if 0x0c == blobType: codeData, somethingImportant = blobData outputStream.writeUInt32(len(codeData) + 0x20 + 8) outputStream.write('\x00' * 0x10) dataArray = scipy.fromstring(codeData, scipy.uint16) calcedXor = scipy.bitwise_xor.reduce(dataArray) outputStream.writeUInt16(calcedXor) outputStream.writeUInt16(0) outputStream.writeUInt32(len(codeData)) outputStream.writeUInt32(0) outputStream.writeUInt32(somethingImportant) outputStream.write(codeData) else: outputStream.writeUInt32(len(blobData) + 8) outputStream.write(blobData)
def from_data(data): """produce a SimPkg from bytedata :Parameters: data : str The data to produce the package from. """ # length check if len(data) < SimPkg.HLEN: raise ValueError('length < SimPkg.HLEN') # read header idx = SimPkg.HLEN tid, ident, frame, nitems = unpack(SimPkg.HDEF, data[:idx]) cont = [] # content loop while idx < len(data): # read contents header dim0, dim1, nbytes, dtype_str = unpack(ContentItem.HDEF, data[idx:idx + ContentItem.HLEN]) idx += ContentItem.HLEN # read content data cont_item = N.fromstring( data[idx:idx + nbytes], dtype=N.dtype(dtype_str) ) if dim0 >= 0: if dim1 >= 0: dim = [dim0, dim1] else: dim = [dim0] cont_item.shape = dim cont.append(cont_item) idx += nbytes # return assert len(cont) == nitems, 'cont list length (%s) does not match nitems (%s)!' % (len(cont), nitems) return SimPkg(tid, ident, frame, tuple(cont))
def _loadEGM96(): """load the EGM96 geoid model into a spline object""" # load the data resource file into a string flc = resource_string(__name__, "data/egm96.dac") # setup basic coordinates lon = sp.linspace(0, 2 * sp.pi, 1440, False) lat = sp.linspace(0, sp.pi, 721) # parse the raw data string data = sp.fromstring(flc, sp.dtype(sp.int16).newbyteorder("B"), 1038240).reshape((lat.size, lon.size)) / 100.0 # interpolate data lut = RectSphereBivariateSpline(lat[1: -1], lon, data[1: -1], pole_values=(sp.mean(data[1]), sp.mean(data[-1]))) return lut
def threader(): for i in range(100): # 時間計測 start_b = time.time() # オーディオデータの呼び出し d = self.wf.readframes(BUFFER_SIZE) # str # strをintに変換 buf = scipy.fromstring(d, scipy.int16) # フィルタリング data = scipy.signal.lfilter(IIR_b, IIR_a, buf) # strに変換 self.buffer = scipy.int16(data).tostring() q.put(self.buffer) # パフォーマンス elapsed_time = time.time() - start_b print("buffer_time:{0}".format(elapsed_time))
def _initialize_file(self, filename, **kwargs): # open file self.fp = open(filename, 'r') # read header dbconfig self.header = _ATF_H(self.fp) self.nchan = len(self.header.signals_exported) self.ndata = self.header.datasets[1] - 1 / self.nchan # read data data = sp.fromstring(self.fp.read(), dtype=self.dtype, sep='\t') data.shape = ( data.shape[0] / self.header.datasets[1], self.header.datasets[1] ) data = data.T self._sample_times = data[0, :] self._data = data[1:, :] del data
def cut_wav(filename, time): # timeの単位は[sec] # ファイルを読み出し #wavf = filename + '.wav' wavf = filename wr = wave.open(wavf, 'r') # waveファイルが持つ性質を取得 ch = wr.getnchannels() width = wr.getsampwidth() fr = wr.getframerate() fn = wr.getnframes() total_time = 1.0 * fn / fr integer = math.floor(total_time) # 小数点以下切り捨て t = int(time) # 秒数[sec] frames = int(ch * fr * t) num_cut = int(integer // t) data = wr.readframes(wr.getnframes()) wr.close() X = fromstring(data, dtype=int16) for i in range(num_cut): # print(i) # 出力データを生成 outf = '001_bunkatsu/' + str(i) + '.wav' start_cut = i * frames end_cut = i * frames + frames # print(start_cut) # print(end_cut) Y = X[start_cut:end_cut] outd = struct.pack("h" * len(Y), *Y) # 書き出し ww = wave.open(outf, 'w') ww.setnchannels(ch) ww.setsampwidth(width) ww.setframerate(fr) ww.writeframes(outd) ww.close() return num_cut
def importWave(self): """Wave file to ndarray""" wf = wave.open(self.filename, 'rb') waveframes = wf.readframes(wf.getnframes()) self.framerate = wf.getframerate() data = sp.fromstring(waveframes, sp.int16) self.duration = float(wf.getnframes()) / self.framerate if(wf.getnchannels() == 2): left = sp.array([data[i] for i in range(0, data.size, 2)]) right = sp.array([data[i] for i in range(1, data.size, 2)]) left = sp.int32(left); right = sp.int32(right) data = sp.int16(left+right) / 2 if(self.fs == None): self.fs = self.framerate else: #data = self.resample(data, data.size*(self.fs/self.framerate)) data = ssig.decimate(data, int(self.framerate/self.fs)) self.duration_list = sp.arange(0, self.duration, 1./self.fs) data = ssig.detrend(data) return data
def get_data(sound): buffer = sound.readframes(sound.getnframes()) buffer = fromstring(buffer, dtype=int16) data_a = [] data_b = [] channels = sound.getnchannels() print(len(buffer)) for i in buffer: tmp = 32768 + i a1 = tmp // 256 a2 = a1 a1 = a1 // 16 a2 = a2 % 16 b1 = tmp % 256 b2 = b1 b1 = b1 // 16 b2 = b2 % 16 data_a.append(a1 + b1 * 16) data_b.append(a2 + b2 * 16) data = [data_a, data_b, channels] return data
def __init__(self, fp): """ :type fp: file :param fp: open file at seek(0) """ # version self.version = fp.readline().strip('\'\"\r\n').split() if self.version != ['ATF', '1.0']: raise DataFileError('wrong version: %s' % self.version) # data set structure self.datasets = fp.readline().strip('\'\"\r\n').split() self.datasets = map(int, self.datasets) if len(self.datasets) != 2: raise DataFileError('invalid file structure: %s' % str(self.datasets)) self.signals_exported = None self.sweep_times = None self.dbconfig = {} # signal names for _ in xrange(self.datasets[0]): line = fp.readline().strip('\'\"\r\n') if line.startswith('SignalsExported'): self.signals_exported = line.split('=')[-1].split(',') elif line.startswith('SweepStartTimesMS'): self.sweep_times = sp.fromstring(line.split('=')[1], sep=',') else: # TODO: if we need other header infos, read in here pass if self.signals_exported is None: raise DataFileError('could not get signal count and names!') # column headers self.col_headers = fp.readline().strip('\r\n').split('\t')[1:] self.col_headers =\ map(str.strip, self.col_headers, ['\'\"'] * len(self.col_headers))
def get_data(sound): buffer = sound.readframes(sound.getnframes()) buffer = fromstring(buffer, dtype=int16) data_a = [] data_b = [] channels = sound.getnchannels() print(len(buffer)) for i in buffer: a_0b = "" b_0b = "" tmp = 32768 + i for j in range(16): if j % 2 == 0: a_0b += str(tmp & 0b1) tmp >>= 1 else: b_0b += str(tmp & 0b1) tmp >>= 1 data_a.append(int(a_0b, 2)) data_b.append(int(b_0b, 2)) data = [data_a, data_b, channels] return data
def _loadEGM96(): """load the EGM96 geoid model into a spline object""" #load the data resource file into a string flc = resource_string(__name__, "data/egm96.dac") #setup basic coordinates lon = sp.linspace(0, 2 * sp.pi, 1440, False) lat = sp.linspace(0, sp.pi, 721) #parse the raw data string data = sp.fromstring(flc, sp.dtype(sp.int16).newbyteorder("B"), 1038240).reshape((lat.size, lon.size)) / 100.0 #interpolate the bad boy lut = RectSphereBivariateSpline(lat[1:-1], lon, data[1:-1], pole_values=(sp.mean(data[1]), sp.mean(data[-1]))) return lut
def WavRead(FNAME, dtype=np.float64): ''' Load WAVE Format file <<Input>> FNAME ... File Name dtype ... Data type <<Output>> x ... Waveform ''' ### File Open ### try: wf = wave.open(FNAME, 'rb') except: print "FILE I/O error!" sys.exit() ### Load all data ### data = wf.readframes(wf.getnframes()) x = sp.fromstring(data, sp.int16) wf.close() return x
def load(cls, filename, network=None): r""" """ net = {} # --------------------------------------------------------------------- # Parse the link1 file filename = cls._parse_filename(filename=filename, ext='am') with open(filename, mode='r') as f: Np = None Nt = None while (Np is None) or (Nt is None): s = f.readline()[:-1].split(' ') if s[0] == 'define': if s[1] == 'VERTEX': Np = int(s[2]) if s[1] == 'EDGE': Nt = int(s[2]) net = {} propmap = {} typemap = {} shapemap = {} while True: s = f.readline()[:-1].split(' ') if s[0] == 'VERTEX': dshape = [Np] if s[2].endswith(']'): ncols = int(s[2].split('[', 1)[1].split(']')[0]) dshape.append(ncols) dtype = s[2].split('[')[0] temp = sp.zeros(dshape, dtype=dtype) net['pore.'+s[3]] = temp key = int(s[-1].replace('@', '')) propmap[key] = 'pore.'+s[3] typemap[key] = dtype shapemap[key] = dshape elif s[0] == 'EDGE': dshape = [Nt] if s[2].endswith(']'): ncols = int(s[2].split('[', 1)[1].split(']')[0]) dshape.append(ncols) dtype = s[2].split('[')[0] temp = sp.zeros(dshape, dtype=dtype) net['throat.'+s[3]] = temp key = int(s[-1].replace('@', '')) propmap[key] = 'throat.'+s[3] typemap[key] = dtype shapemap[key] = dshape elif s[0] == '#': break s = f.read().split('@') for key in propmap.keys(): if key in s: data = s[key].split('\n')[1:] data = ' '.join(data) arr = sp.fromstring(data, dtype=typemap[key], sep=' ') arr = sp.reshape(arr, newshape=shapemap[key]) net[propmap[key]] = arr # End file parsing net['pore.coords'] = net['pore.VertexCoordinates'] net['throat.conns'] = sp.sort(net['throat.EdgeConnectivity'], axis=1) if network is None: network = GenericNetwork() network = cls._update_network(network=network, net=net) return network.project
import pyaudio from scipy import fromstring, int16 from matplotlib import pyplot as pl CHUNK=1024 p=pyaudio.PyAudio() input_device_index=0 stream=p.open(format=pyaudio.paInt16, channels=1, rate=44100, frames_per_buffer=CHUNK, input=True) i=0 while stream.is_active(): try: input=stream.read(CHUNK) num_data=fromstring(input, dtype="int16")/32768.0 #print(num_data) #print('最大値:'+str(num_data.max())) if(num_data.max()>=0.25): print('BIG!!!!!!!!'+str(i)) i+=1 #pl.plot(num_data) #pl.draw() #pl.pause(0.01) #pl.cla() except KeyboardInterrupt: #pl.close() break
def exec_wav_spectrum(): # 仮想COMポートなのでボーレートは無意味 ser = serial.Serial('COM52', 1000000) hamming_win = sp.hamming(FFT_SIZE) wavefile = wave.open('mtank.wav', 'rb') frames = wavefile.readframes(wavefile.getnframes()) # ±1の範囲に正規化 wavedata = sp.fromstring(frames, dtype='int16') / 32768.0 wavefile.rewind() pa = pyaudio.PyAudio() print('WAV: [OK]') count = 0 gain = 1.00 fps = 60.0 def callback(in_data, frame_count, time_info, status): frames = wavefile.readframes(frame_count) wave = sp.fromstring(frames, dtype='int16') # 再生音量にもゲイン適用 gained_wave = wave * gain gained_wave = np.clip(gained_wave, -32768, +32767) data = bytes(gained_wave.astype(np.int16)) return (data, pyaudio.paContinue) stream = pa.open(format = pa.get_format_from_width(wavefile.getsampwidth()), channels = wavefile.getnchannels(), rate = wavefile.getframerate(), output = True, stream_callback = callback) stream.start_stream() start_time = time.time() while (stream.is_active()): """ モード切り替え判定 """ ser.write('md\n'.encode()) line = ser.readline() if (int(line) is 1): print('Mode Switch!') break # ゲイン調整 if ((count % 10) == 0): gain = read_gain(ser) print('gain=%.2f' % gain) # 経過時間からフレーム箇所を特定してそこをFFTする # data[1024](要正規化) --> spec[24] frame_time = time.time() - start_time frame_pos = int(frame_time * wavefile.getframerate()) fft_input = wavedata[frame_pos : frame_pos + FFT_SIZE] # ゲイン適用 fft_input = fft_input * gain if (len(fft_input) < FFT_SIZE): fft_input = np.zeros(FFT_SIZE) fft_output = sp.fft(fft_input * hamming_win) ######################################## y = [] toStep = 0 fromStep = 0 specSize = 513 for i in range(len(bandHz)): bandStep = bandHz[i] toStep += bandStep if (toStep > specSize): toStep = specSize bandAve = 0.0 j = fromStep while (j < toStep): bandDB = 0.0 if (abs(fft_output[j]) >= 0.001): bandDB = 2 * (20 * ((math.log10(abs(fft_output[j]))))) bandDB = (20 * ((math.log10(abs(fft_output[j]))))) if (bandDB < 0): bandDB = 0 bandAve += bandDB j += 1 # 平均値 bandAve /= bandStep fromStep = toStep # 最終加工 bandAve /= 1.5 y.append(int(bandAve)) ######################################## spec = y panel = spec_to_panel(spec) # 更新回数表示 panel[0][23] = num_to_pattern[count // 1 % 10] panel[0][22] = num_to_pattern[count // 10 % 10] panel[0][21] = num_to_pattern[count // 100 % 10] panel[0][20] = num_to_pattern[count // 1000 % 10] xfer_data = panel_to_command(panel, 0x01) write_display(ser, xfer_data) # fpsの値に合わせて規定時間になるまで待つ expected_time = start_time + (((1000.0 / fps) * count) / 1000) while (time.time() < expected_time): time.sleep(0.001) print('_', end='') count += 1 elapsed_time = time.time() - start_time ser.close() stream.stop_stream() stream.close() wavefile.close() pa.terminate() if (elapsed_time > 0): print('%f [s]' % elapsed_time) print('fps = %f' % (count / elapsed_time)) print('Done.')
def train_loop(model, optimizer, train_set, scheduler=None): num_mb = len(train_set) // hp.batch_size if scheduler: scheduler.step(epoch) for i in range(num_mb): # input lmfb (B x T x (F x frame_stacking)) xs = [] # target symbols ts = [] # onehot vector of target symbols (B x L x NUM_CLASSES) ts_onehot = [] # vector of target symbols for label smoothing (B x L x NUM_CLASSES) ts_onehot_LS = [] # input lengths emo = [] emo_onehot = [] emo_onehot_LS = [] lengths = [] ts_lengths = [] temp = [] temp_length = [] for j in range(hp.batch_size): s = train_set[i * hp.batch_size + j].strip() if hp.ASR: x_file, laborg = s.split(' ', 1) elif hp.dist: x_file, laborg, labemo, labdist = s.split('\t') laborg = laborg.strip() labemo = labemo.strip() labdist = labdist.strip() else: x_file, laborg, labemo = s.split('\t') laborg = laborg.strip() labemo = labemo.strip() #if len(laborg) == 0: # laborg = "2 0 1" if '.htk' in x_file: #mean = np.load("/n/work1/feng/src/htk/mean.npy") #var = np.load("/n/work1/feng/src/htk/var.npy") cpudat = load_dat(x_file) cpudat = cpudat[:, :hp.lmfb_dim] #cpudat = (cpudat-mean)/var #print(mean) elif '.npy' in x_file: #mean = np.load("/n/work1/feng/data/swb/mean.npy") #var = np.load("/n/work1/feng/data/swb/var.npy") cpudat = np.load(x_file) #cpudat = (cpudat-mean)/var elif '.wav' in x_file: with wave.open(x_file) as wf: dat = wf.readframes(wf.getnframes()) y = fromstring(dat, dtype=int16)[:, np.newaxis] y_float = y.astype(np.float32) cpudat = (y_float - np.mean(y_float)) / np.std(y_float) tmp = copy.deepcopy(cpudat) print("{} {}".format(x_file, cpudat.shape[0])) if hp.frame_stacking > 1 and hp.encoder_type != 'Wave': cpudat, newlen = frame_stacking(cpudat, hp.frame_stacking) newlen = cpudat.shape[0] if hp.encoder_type == 'CNN': cpudat_split = np.split(cpudat, 3, axis=1) cpudat = np.hstack((cpudat_split[0].reshape(newlen, 1, 80), cpudat_split[1].reshape(newlen, 1, 80), cpudat_split[2].reshape(newlen, 1, 80))) newlen = cpudat.shape[0] lengths.append(newlen) xs.append(cpudat) temp.append(tmp) temp_length.append(tmp.shape[0]) cpulab = np.array([int(i) for i in laborg.split(' ')], dtype=np.int32) #print(cpulab) cpulab_onehot = onehot(cpulab, hp.num_classes) ts.append(cpulab) ts_lengths.append(len(cpulab)) ts_onehot.append(cpulab_onehot) ts_onehot_LS.append(0.9 * cpulab_onehot + 0.1 * 1.0 / hp.num_classes) if hp.dist and hp.ASR == False: cpuemo = np.array([int(x) for x in labemo], dtype=np.int32) emotion_onehot = onehot_dist(labdist, hp.num_emotion) emo_onehot.append(emotion_onehot) emo_onehot_LS.append(0.9 * emotion_onehot + 0.1 * 1.0 / hp.num_emotion) emo.append(cpuemo) elif hp.ASR == False: cpuemo = np.array([int(x) for x in labemo], dtype=np.int32) emotion_onehot = onehot(cpuemo, hp.num_emotion) emo_onehot.append(emotion_onehot) emo_onehot_LS.append(0.9 * emotion_onehot + 0.1 * 1.0 / hp.num_emotion) emo.append(cpuemo) if hp.baseline_type != 'lim_BLSTM': temp, temp_length = xs, lengths if hp.ASR: xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths = sort_pad( hp.batch_size, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths) youtput_in_Variable = model(xs, lengths, ts_onehot, [], []) loss = 0.0 if hp.decoder_type == 'Attention': for k in range(hp.batch_size): num_labels = ts_lengths[k] loss += label_smoothing_loss( youtput_in_Variable[k][:num_labels], ts_onehot_LS[k][:num_labels], 1) / num_labels print('loss = {}'.format(loss.item())) elif hp.baseline: xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp = sort_pad( hp.batch_size, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp, temp_length) if hp.baseline_type == 'CNN_BLSTM' or hp.baseline_type == 'lim_BLSTM': onehot_length = temp.size(2) xs_new = torch.zeros((hp.batch_size, 750, onehot_length)) for i in range(hp.batch_size): feature_length = temp.size(1) if feature_length > 750: xs_new.data[:, :750, :] = temp.data[:, :750, :] else: xs_new.data[:, : feature_length, :] = temp.data[:, : feature_length, :] emotion_in_Variable = model(xs_new.to(DEVICE), []) else: #youtput_in_Variable, emotion_in_Variable = model(xs, lengths, ts_onehot, emo_onehot, []) emotion_in_Variable = model(xs, lengths) loss = 0.0 if hp.decoder_type == 'Attention': #print(emo) #print(emotion_in_Variable[:,:hp.num_emotion]) loss += F.cross_entropy( emotion_in_Variable[:, :hp.num_emotion], emo.to(DEVICE)) #for k in range(hp.batch_size): #num_labels = ts_lengths[k] #loss += label_smoothing_loss(youtput_in_Variable[k][:num_labels], ts_onehot_LS[k][:num_labels],1) / num_labels #print(emotion_in_Variable[k][:hp.num_emotion]) #loss += F.cross_entropy(emotion_in_Variable[k][:hp.num_emotion], emo) print('loss = {}'.format(loss.item())) elif hp.text_based: xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp = sort_pad( hp.batch_size, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp, temp_length) emotion_in_Variable = model(ts.to(DEVICE), ts_lengths.to(DEVICE)) loss = 0.0 if hp.decoder_type == 'Attention': #print(emo) #print(emotion_in_Variable[:,:hp.num_emotion]) loss += F.cross_entropy( emotion_in_Variable[:, :hp.num_emotion], emo.to(DEVICE)) #for k in range(hp.batch_size): #num_labels = ts_lengths[k] #loss += label_smoothing_loss(youtput_in_Variable[k][:num_labels], ts_onehot_LS[k][:num_labels],1) / num_labels #print(emotion_in_Variable[k][:hp.num_emotion]) #loss += F.cross_entropy(emotion_in_Variable[k][:hp.num_emotion], emo) print('loss = {}'.format(loss.item())) elif hp.combined: #seq1 = [] #seq2 = [] #seq1, seq2, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, \ #xs1, lengths1, ts1, ts_onehot1, ts_onehot_LS1, ts_lengths1, emo1, emo_onehot1, emo_onehot_LS1 \ #= sort_pad(hp.batch_size, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS) xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp = sort_pad( hp.batch_size, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp, temp_length) if hp.baseline_type == 'CNN_BLSTM' or hp.baseline_type == 'lim_BLSTM': onehot_length = temp.size(2) xs_new = torch.zeros((hp.batch_size, 750, onehot_length)) for i in range(hp.batch_size): feature_length = temp.size(1) if feature_length > 750: xs_new.data[:, :750, :] = temp.data[:, :750, :] else: xs_new.data[:, : feature_length, :] = temp.data[:, : feature_length, :] emotion_in_Variable = model(xs_new.to(DEVICE), [], ts.to(DEVICE), ts_lengths.to(DEVICE)) else: emotion_in_Variable = model(xs.to(DEVICE), lengths, ts.to(DEVICE), ts_lengths.to(DEVICE)) loss = 0.0 if hp.decoder_type == 'Attention': #print(emo) #print(emotion_in_Variable[:,:hp.num_emotion]) #for i in range(hp.batch_size): # for j in range(hp.batch_size): # if seq1[j] == i: # break # temp = emo[i] # emo[i] = emo[j] # emo[j] = temp loss += F.cross_entropy( emotion_in_Variable[:, :hp.num_emotion], emo.to(DEVICE)) #for k in range(hp.batch_size): #num_labels = ts_lengths[k] #loss += label_smoothing_loss(youtput_in_Variable[k][:num_labels], ts_onehot_LS[k][:num_labels],1) / num_labels #print(emotion_in_Variable[k][:hp.num_emotion]) #loss += F.cross_entropy(emotion_in_Variable[k][:hp.num_emotion], emo) print('loss = {}'.format(loss.item())) elif hp.combined_ASR or hp.ASR_based: xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp = sort_pad( hp.batch_size, xs, lengths, ts, ts_onehot, ts_onehot_LS, ts_lengths, emo, emo_onehot, emo_onehot_LS, temp, temp_length) if hp.baseline_type == 'CNN_BLSTM' or hp.baseline_type == 'lim_BLSTM': onehot_length = temp.size(2) xs_new = torch.zeros((hp.batch_size, 750, onehot_length)) for i in range(hp.batch_size): feature_length = temp.size(1) if feature_length > 750: xs_new.data[:, :750, :] = temp.data[:, :750, :] else: xs_new.data[:, : feature_length, :] = temp.data[:, : feature_length, :] youtput_in_Variable, emotion_in_Variable = model( xs, lengths, ts_onehot, emo_onehot, xs_new.to(DEVICE)) else: youtput_in_Variable, emotion_in_Variable = model( xs, lengths, ts_onehot, emo_onehot, []) loss = 0.0 if hp.decoder_type == 'Attention': #print(emo) #print(emotion_in_Variable[:,:hp.num_emotion]) loss += F.cross_entropy( emotion_in_Variable[:, :hp.num_emotion], emo.to(DEVICE)) * 0.8 print(loss) for k in range(hp.batch_size): num_labels = ts_lengths[k] loss += label_smoothing_loss( youtput_in_Variable[k][:num_labels], ts_onehot_LS[k][:num_labels], 1) / num_labels * 0.2 #print(emotion_in_Variable[k][:hp.num_emotion]) #loss += F.cross_entropy(emotion_in_Variable[k][:hp.num_emotion], emo) print('loss = {}'.format(loss.item())) sys.stdout.flush() optimizer.zero_grad() # backward loss.backward() clip = 1.0 torch.nn.utils.clip_grad_value_(model.parameters(), clip) # optimizer update optimizer.step() loss.detach() torch.cuda.empty_cache()
synthfile = 'tools/sound/noisy.wav' synth = wave.open(synthfile, 'wb') synth.setnchannels(1) synth.setsampwidth(2) synth.setframerate(samplingrate) remain = sound.getnframes() while remain > 0: s = min(chunk, remain) #read frames data_sound = sound.readframes(s) data_noise = noise.readframes(s) #convert ary_sound = sp.fromstring(data_sound, sp.int16) ary_noise = sp.fromstring(data_noise, sp.int16) int32_ary_sound = sp.int32(ary_sound) int32_ary_noise = sp.int32(ary_noise) ary2 = sp.int16(int32_ary_sound + int32_ary_noise) data2 = ary2.tostring() synth.writeframes(data2) remain = remain - s sound.close() noise.close() synth.close() infile = 'tools/sound/noisy.wav' signal, params = read_signal(infile, WINSIZE) nf = len(signal) / (WINSIZE / 2) - 1
synthfile = 'tools/sound/noisy.wav' synth = wave.open(synthfile, 'wb') synth.setnchannels(1) synth.setsampwidth(2) synth.setframerate(samplingrate) remain = sound.getnframes() while remain > 0: s = min(chunk, remain) # read frames data_sound = sound.readframes(s) data_noise = noise.readframes(s) # convert ary_sound = sp.fromstring(data_sound, sp.int16) ary_noise = sp.fromstring(data_noise, sp.int16) int32_ary_sound = sp.int32(ary_sound) int32_ary_noise = sp.int32(ary_noise) ary2 = sp.int16(int32_ary_sound + int32_ary_noise) data2 = ary2.tostring() synth.writeframes(data2) remain = remain - s sound.close() noise.close() synth.close() infile = 'tools/sound/noisy.wav' signal, params = read_signal(infile, WINSIZE) nf = len(signal) / (WINSIZE / 2) - 1
p = pyaudio.PyAudio() filename = "121_dr_bpm080_4-4_rock.wav" wf = wave.open(filename, "rb") stream = p.open(format=pyaudio.paInt16, channels=1, rate=44100, frames_per_buffer=CHUNK, input=True, output=True) #リアルタイム録音再生 while stream.is_active(): try: input = stream.read(CHUNK) num_data = fromstring(input, dtype=int16) print(num_data) list = np.array(num_data) output = stream.write(list) pyplot.plot(list) pyplot.draw() pyplot.pause(0.05) pyplot.cla() except KeyboardInterrupt: pyplot.close() break if (wf.getnchannels() == 2): left = num_data[::2] right = num_data[1::2]
def mainLoop(): global terminated, fftSpec # start Recording audio = pyaudio.PyAudio() stream = audio.open(format=FORMAT, channels=CHANNELS, rate=FS, input=True, frames_per_buffer=CHUNK) ### Application Creation ### Main window mainWindow = myMainWindow() mainWindow.show() mainWindow.setWindowTitle("Spectrum Analyzer") # Title mainWindow.resize(1300, 500) # Size ### Campus centralWid = QtGui.QWidget() mainWindow.setCentralWidget(centralWid) layH = QtGui.QHBoxLayout() centralWid.setLayout(layH) LeftWidget = QtGui.QWidget() RightWidget = QtGui.QWidget() layH.addWidget(LeftWidget) layH.addWidget(RightWidget) LeftlayV = QtGui.QVBoxLayout() LeftWidget.setLayout(LeftlayV) RightlayV = QtGui.QVBoxLayout() RightWidget.setLayout(RightlayV) ### Original Wave display widget waveWid = pg.PlotWidget(title="Original Wave") origWave = waveWid.getPlotItem() origWave.setMouseEnabled(y=False) # to not be moved to the y-axis direction origWave.setYRange(-10000, 10000) origWave.setXRange(0, 512, padding=0) ### Axis specAxis = origWave.getAxis("bottom") specAxis.setLabel("Samples") LeftlayV.addWidget(waveWid) ### Spectrum display widget fftWid = pg.PlotWidget(title="FFT") fftItem = fftWid.getPlotItem() fftItem.setMouseEnabled(y=False) # to not be moved to the y-axis direction fftItem.setYRange(0, 3000) fftItem.setXRange(0, FS/2, padding=0) ### Axis specAxis = fftItem.getAxis("bottom") specAxis.setLabel("Frequency [Hz]") LeftlayV.addWidget(fftWid) ### Spectogram specWid = pg.PlotWidget() specItem = pg.ImageItem() specWid.addItem(specItem) img_array = np.zeros((100, CHUNK // 2)) # bipolar colormap pos = np.array([0., 1., 0.5, 0.25, 0.75]) color = np.array([[0, 255, 255, 255], [255, 255, 0, 255], [0, 0, 0, 255], (0, 0, 255, 255), (255, 0, 0, 255)], dtype=np.ubyte) cmap = pg.ColorMap(pos, color) lut = cmap.getLookupTable(0.0, 1.0, 256) # set colormap specItem.setLookupTable(lut) specItem.setLevels([-50, 40]) # setup the correct scaling for y-axis freq = np.arange((CHUNK / 2) + 1) / (float(CHUNK) / FS) yscale = 1.0 / (img_array.shape[1] / freq[-1]) specItem.scale((1. / FS) * CHUNK, yscale) specWid.setLabel('left', 'Frequency', units='Hz') RightlayV.addWidget(specWid) ### Window display mainWindow.show() frames = [] open('recorded.bin', 'w').close() file = open('recorded.bin', 'a') while not terminated: orig = np.array([]) # get audio samples data = stream.read(CHUNK) frames.append(data) orig = fromstring(data, dtype="int16") # Transform to frequency domain (FFT) originalfft = np.array(fft(orig)) fftSpec = abs(originalfft) / (CHUNK / 2) fftSpec = fftSpec[:int(CHUNK / 2)] xf = 1.0 * np.arange(0, FS / 2., FS / (1.*CHUNK)) # Spectrogram img_array = np.roll(img_array, -1, 0) img_array[-1:] = 10.0 * np.log10(fftSpec) # Plotting Graphs origWave.plot(orig, clear=True) fftItem.plot(xf, fftSpec, clear=True) specItem.setImage(img_array, autoLevels=False) QtGui.QApplication.processEvents() stream.stop_stream() stream.close() audio.terminate() file.close() print('Writing wav data') waveFile = wave.open('recorded.wav', 'wb') waveFile.setnchannels(CHANNELS) waveFile.setsampwidth(audio.get_sample_size(FORMAT)) waveFile.setframerate(FS) waveFile.writeframes(b''.join(frames)) waveFile.close() print('Finished writing wav data') print('Exiting...') sys.exit()
import scipy as sp from scipy.io.wavfile import read, write import pylibpd as pd num_chans = 1 sampling_rate = 44100 # open a Pure Data patch m = pd.PdManager(num_chans, num_chans, sampling_rate, 1) patch = pd.libpd_open_patch("ring_mod.pd") # get the default frame size frame_size = pd.libpd_blocksize() # read audio file audio = read("drums.wav")[1] # process each frame out = sp.array([], dtype=sp.int16) for i in range(0, len(audio), frame_size): f = audio[i:i + frame_size] p = m.process(f) p = sp.fromstring(p, sp.int16) out = sp.hstack((out, p)) # close the patch pd.libpd_close_patch(patch) # write the audio file to disk write("drums_ringmod.wav", 44100, out)
#!/usr/bin/env python # -*- coding: utf-8 -*- import scipy as sp import scipy.signal as sig import wave fsamp = 44100.0 fpass = 5000.0 fstop = 6000.0 wp = fpass / (fsamp / 2) ws = fstop / (fsamp / 2) b, a = sig.iirdesign(wp, ws, 1, 30) fs, h = sig.freqs(b, a) filename = 'white_noise2.wav' wf = wave.open(filename, 'rb') n = wf.getnframes() s = wf.readframes(n) x = sp.fromstring(s, sp.int16) y = sig.lfilter(b, a, x) o_filename = 'filtered.wav' wf_o = wave.open(o_filename, 'wb') wf_o.setnchannels(1) wf_o.setsampwidth(2) wf_o.setframerate(44100) wf_o.writeframes(sp.int16(y).tostring()) wf_o.close() wf.close()
#!/usr/bin/env python # -*- coding: utf-8 -*- import scipy as sp import scipy.signal as sig import wave fsamp = 44100.0 fpass = 5000.0 fstop = 6000.0 wp = fpass / (fsamp / 2 ) ws = fstop / (fsamp / 2 ) b,a = sig.iirdesign(wp, ws, 1, 30) fs, h = sig.freqs(b,a) filename='white_noise2.wav' wf = wave.open(filename,'rb') n=wf.getnframes() s=wf.readframes(n) x = sp.fromstring(s,sp.int16) y = sig.lfilter(b,a,x) o_filename='filtered.wav' wf_o=wave.open(o_filename,'wb') wf_o.setnchannels(1) wf_o.setsampwidth(2) wf_o.setframerate(44100) wf_o.writeframes(sp.int16(y).tostring()) wf_o.close() wf.close()
import wave import struct from scipy import fromstring, int16 # output.wav /Desktop wavf = '/Users/itounagamitsu/Desktop/AM_python/output_sin.wav' wr = wave.open(wavf, 'rb') # waveファイルが持つ性質を取得 ch = wr.getnchannels() width = wr.getsampwidth() fr = wr.getframerate() fn = wr.getnframes() print("Channel: ", ch) print("Sample width: ", width) print("Frame Rate: ", fr) print("Frame num: ", fn) print("Params: ", wr.getparams()) print("Total time: ", 1.0 * fn / fr) # waveの実データを取得し、数値化 data = wr.readframes(wr.getnframes()) wr.close() X = fromstring(data, dtype=int16)
import math import numpy as np import random import pickle import wave from scipy import fromstring, int16 from scipy.fftpack import fft batch_size = 100 num_of_kinds = 200 dataset = [] dummy = np.zeros((2, 2752512)) wr = wave.open('./data/test01_ch1.WAV', 'rb') dummy[0, :] = fromstring(wr.readframes(wr.getnframes()), dtype=int16) wr = wave.open('./data/test01_ch2.WAV', 'rb') dummy[1, :] = fromstring(wr.readframes(wr.getnframes()), dtype=int16) dataset.append(dummy) dummy = np.zeros((2, 2752512)) wr = wave.open('./data/test02_ch1.WAV', 'rb') dummy[0, :] = fromstring(wr.readframes(wr.getnframes()), dtype=int16) wr = wave.open('./data/test02_ch2.WAV', 'rb') dummy[1, :] = fromstring(wr.readframes(wr.getnframes()), dtype=int16) dataset.append(dummy) dummy = np.zeros((2, 2752512)) wr = wave.open('./data/test03_ch1.WAV', 'rb') dummy[0, :] = fromstring(wr.readframes(wr.getnframes()), dtype=int16) wr = wave.open('./data/test03_ch2.WAV', 'rb')
list3 = [0, 0, 0, 1, 0, 0, 0, 0] list4 = [0, 0, 0, 0, 1, 0, 0, 0] list5 = [0, 0, 0, 0, 0, 1, 0, 0] list6 = [0, 0, 0, 0, 0, 0, 1, 0] list7 = [0, 0, 0, 0, 0, 0, 0, 1] i = 0 j = 1 k = 0 ###学習用データセット while (i <= 315): while (j <= 44): wr = wave.open('./sound_' + str(i) + '/output/' + str(j) + '.wav', 'rb') data = wr.readframes(wr.getnframes()) num_data = fromstring(data, dtype='int16') / 32768.0 sound.append(num_data) j += 1 i += 45 j = 1 ###学習用解答データセット while (k <= 352): if (k <= 43): answer.append(list0) elif (k >= 44 and k <= 87): answer.append(list1) elif (k >= 88 and k <= 131): answer.append(list2) elif (k >= 132 and k <= 175): answer.append(list3)
def clean(self, returnString): """converts a string delimited with commas into a numpy array """ return scipy.fromstring(returnString, sep=',')
inPCM.setchannels(ch) inPCM.setrate(fs) inPCM.setformat(alsaaudio.PCM_FORMAT_S16_LE) inPCM.setperiodsize(chunk) signal = zeros(fftLen, dtype=float) tmpSound = 0 up = 0 down = 0 max = 0 count = 0 while 1: # Check Baby Crying or not length, data = inPCM.read() num_data = fromstring(data, dtype="int16") signal = roll(signal, -chunk) signal[-chunk:] = num_data fftspec = fft(signal) babySound = abs(fftspec[146] * signal_scale) # This is may be baby's crying Hz (1000Hz) # First, check baby's minimum sound if babySound > 50 and babySound < 150: totalStartTimer = time.time() # Timer start totalEndTimer = 0 while totalEndTimer - totalStartTimer < 50: # Check during 50 seconds totalEndTimer = time.time() if babySound >= tmpSound: # If recent sound is higher than previous sound, doubt baby's sound up = 1
def readWave(self, stream): while True: #print(stream.read(1024)) num_data = fromstring(stream.read(1024), dtype='int16') / 32768.0 print(num_data)
import wave from scipy import fromstring, int16 # 90秒分に相当するフレーム数を算出 ch = 2 fr = 44100 width = 2 file = open('original.txt', 'r') #読み込みモードでオープン string = file.read() #readですべて読み込む # print(string) X = fromstring(string, dtype=int16) print(X) # 出力データを生成 outf = './test_original.wav' w = wave.Wave_write(outf) w.setnchannels(2) w.setsampwidth(4) w.setframerate(44100) w.setnframes(fr * ch) # w.writeframes(x) w.close()
# regard a word less confident if it has more than one syllable if wordConfi/wordCount>confidence_threshold: confidences[wordPy]=wordConfi/wordCount timestamps[wordPy]=[wordT0,wordT1] # extract basic information of the wave file audio=wave.open(wav_file,'r') ch = audio.getnchannels() width = audio.getsampwidth() fr = audio.getframerate() fn = audio.getnframes() data = audio.readframes(fn) audioContent = fromstring(data, dtype=int16) # split the wave file if not os.path.exists('./output'): os.makedirs('./output') for name in timestamps: segment=audioContent[int(timestamps[name][0]*fr*ch):int(timestamps[name][1]*fr*ch)] outd = struct.pack("h" * len(segment), *segment) ww = wave.open('./output/'+name+'.wav', 'w') ww.setnchannels(ch) ww.setsampwidth(width) ww.setframerate(fr) ww.writeframes(outd) ww.close()
def spectrumAnalyzer(): global fftLen, capture_setting, signal_scale ########################## # Capture Sound from Mic # ########################## ch = capture_setting["ch"] fs = capture_setting["fs"] chunk = capture_setting["chunk"] inPCM = alsaaudio.PCM(alsaaudio.PCM_CAPTURE) inPCM.setchannels(ch) inPCM.setrate(fs) inPCM.setformat(alsaaudio.PCM_FORMAT_S16_LE) inPCM.setperiodsize(chunk) signal = zeros(fftLen, dtype = float) ########## # Layout # ########## app = QtGui.QApplication([]) app.quitOnLastWindowClosed() mainWindow = QtGui.QMainWindow() mainWindow.setWindowTitle("Spectrum Analyzer") mainWindow.resize(800, 300) centralWid = QtGui.QWidget() mainWindow.setCentralWidget(centralWid) lay = QtGui.QVBoxLayout() centralWid.setLayout(lay) specWid = pg.PlotWidget(name="spectrum") specItem = specWid.getPlotItem() specItem.setMouseEnabled(y = False) specItem.setYRange(0, 1000) specItem.setXRange(0, fftLen / 2, padding = 0) specAxis = specItem.getAxis("bottom") specAxis.setLabel("Frequency [Hz]") specAxis.setScale(fs / 2. / (fftLen / 2 + 1)) hz_interval = 500 newXAxis = (arange(int(fs / 2 / hz_interval)) + 1) * hz_interval oriXAxis = newXAxis / (fs / 2. / (fftLen / 2 + 1)) specAxis.setTicks([zip(oriXAxis, newXAxis)]) lay.addWidget(specWid) mainWindow.show() # update for time in range(100): length, data = inPCM.read() num_data = fromstring(data, dtype = "int16") signal = roll(signal, - chunk) signal[- chunk :] = num_data fftspec = fft(signal) print signal[1800:1900] specItem.plot(abs(fftspec[1 : fftLen / 2 + 1] * signal_scale), clear = True) QtGui.QApplication.processEvents()
#!/usr/bin/env python # -*- coding: utf-8 -*- import numpy as np import pyaudio from scipy import fromstring, int16 from matplotlib import pyplot as pl CHUNK = 1024 p = pyaudio.PyAudio() input_device_index = 0 stream = p.open(format=pyaudio.paInt16, channels=2, rate=44100, frames_per_buffer=CHUNK, input=True) input = stream.read(CHUNK) print 'input:' + str(len(input)) num_data = fromstring(input, dtype='int16') / 32768.0 print 'num_data:' + str(len(num_data))
wf2 = wave.open(filename2, "rb") stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) stream2 = p.open(format=p.get_format_from_width(wf2.getsampwidth()), channels=wf2.getnchannels(), rate=wf2.getframerate(), output=True) data2 = wf2.readframes(CHUNK) data = wf.readframes(CHUNK) num_data = fromstring(data, dtype=int16) num_data2 = fromstring(data, dtype=int16) #num_data=left(num_data) #ただのwav再生 while data != '': list = np.array(num_data) list2 = np.array(num_data2) stream.write(list2) data = wf.readframes(CHUNK) data2 = wf.readframes(CHUNK) num_data = fromstring(data, dtype=int16) num_data2 = fromstring(data2, dtype=int16) """リアルタイム録音再生 while stream.is_active(): try:
def readTxt(self, filePath, fileName): self.fileDateStr = 170816 # UTC date file was created. if fileName == 'hylebos1baseline.txt': fileNum = 1 elif fileName == 'hylebos1_a.txt': fileNum = 2 elif fileName == 'hylebos1_b.txt': fileNum = 3 elif fileName == 'hylebos2baseline.txt': fileNum = 4 elif fileName == 'hylebos2_a.txt': fileNum = 5 self.fileNum = fileNum # File number in set. self.descript = fileName # Description of the test. self.minor = '' # Minor note. self.major = '' # Major note. self.scanChCount = 8 # number of channels in each A/D scan. self.chCount = 8 # number of channels written to the file. self.n = 4096 # Number of samples in the FFT time series. self.fs = 4096 # (Hz) FFT sampling frequency. self.xmitFund = 4 # (Hz) Transmit Square # wave fundamental frequency. # Read IP measurements from a text file. with open(filePath, 'r') as fh: # Number of lines in the file. lineCount = self.countLines(fh) # Rewind the pointer in the file back to the beginning. fh.seek(0) # Initialize the packet counter. p = -1 # Initialize the sample index. s = -1 # Each file contains a file header of length 10 lines, # followed by packets. Packets contain (11 + n) lines each. self.pktCount = int((lineCount) / (1 + self.n)) # Dimension arrays indexed by packet. self.dimArrays() self.rCurrentMeas = 0.5 # (Ohm) resistance. self.rExtraSeries = 0.0 # (Ohm). # Voltage measurement names. # 0-indexed by channel number. self.measStr = [ 'currentMeas', 'R1-R2', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A' ] # Construct arrays using the scipy package. # 5B amplifier maximum of the input range (V). # 0-indexed by channel number. self.In5BHi = sp.array([1, 10, 10, 10, 10, 10, 10, 10]) # 5B amplifier maximum of the output range (V). # 0-indexed by channel number. self.Out5BHi = sp.array([5, 5, 5, 5, 5, 5, 5, 5]) # MccDaq board AIn() maximum of the input range (V). # 0-indexed by channel number. self.ALoadQHi = sp.array([5, 5, 5, 5, 5, 5, 5, 5]) for lidx, line in enumerate(fh, 1): # Strip off trailing newline characters. line = line.rstrip('\n') if line[0] == '$': # Increment the packet index. p += 1 # Packet number spl = line[2:].split(',') self.pkt[p] = int(spl[0]) # CPU UTC Date and Time Strings. self.cpuDTStr[p].d = '170816' self.cpuDTStr[p].t = '160000.00' # Translate to datetime object. self.cpuDT[p] = self.str2DateTime(self.cpuDTStr[p]) # GPS UTC Date and Time Strings, # and latitude and longitude fixes. self.gpsDTStr[p].d = '170816' self.gpsDTStr[p].t = '160000.000' self.lat[p] = 0. self.longi[p] = 0. # Translate to datetime object. self.gpsDT[p] = self.str2DateTime(self.gpsDTStr[p]) assignArr = sp.array([1, 1, 1, 1, 1, 1, 1, 1]) # Count of measurements clipped on the high end of # the MccDaq board's input range. self.clipHi[:, p] = assignArr # Count of measurements clipped on the low end of # the MccDaq board's input range. self.clipLo[:, p] = assignArr # Mean measurement value over the packet as a # percentage of the AIn() half range. self.meanPct[:, p] = assignArr # (pct) Mean value of sample measurements above # or equal to the mean. self.meanUpPct[:, p] = assignArr # (pct) Mean value of sample measurements below # the mean. self.meanDnPct[:, p] = assignArr # Count of measurements above or equal to the mean. self.countUp[:, p] = assignArr # Count of measurements below the mean. self.countDn[:, p] = assignArr # Set the sample index to 0 to start. s = 0 elif line[0] != '*': # Read in raw voltage values. self.raw[:, p, s] = (sp.fromstring(line, dtype=float, sep=',')) if s == self.n - 1: # Reset the counter to below zero. s = -1 else: # Increment the sample counter for the next read. s += 1 # After the file has been read, perform some calculations. self.postRead()
import matplotlib.pyplot as plt import wave import glob import scipy as sp for fname in glob.glob("CAL500_wav/*.wav"): print(fname) wo = wave.open(fname, 'rb') chunk = 65536 data = sp.fromstring(wo.readframes(chunk), sp.int16) srate = wo.getframerate() nFFT = 1024 window = sp.hamming(nFFT) fig, ax = plt.subplots(1) fig.subplots_adjust(left=0, right=1, bottom=0, top=1) ax.set_axis_off() Pxx, freq, bins, im = ax.specgram(data, NFFT=nFFT, Fs=srate, noverlap=512, window=window) plt.savefig(fname.replace('wav', 'png')) plt.close(fig)
def load(cls, path, node_file="throats_cellsThroatsGraph_Nodes.txt", graph_file="throats_cellsThroatsGraph.txt", network=None, voxel_size=None, return_geometry=False): r""" Loads network data from an iMorph processed image stack Parameters ---------- path : string The path of the folder where the subfiles are held node_file : string The file that describes the pores and throats, the default iMorph name is: throats_cellsThroatsGraph_Nodes.txt graph_file : string The file that describes the connectivity of the network, the default iMorph name is: throats_cellsThroatsGraph.txt network : OpenPNM Network Object The OpenPNM Network onto which the data should be loaded. If no network is supplied then an empty import network is created and returned. voxel_size : float Allows the user to define a voxel size different than what is contained in the node_file. The value must be in meters. return_geometry : Boolean If True, then all geometrical related properties are removed from the Network object and added to a GenericGeometry object. In this case the method returns a tuple containing (network, geometry). If False (default) then the returned Network will contain all properties that were in the original file. In this case, the user can call the ```split_geometry``` method explicitly to perform the separation. Returns ------- If no Network object is supplied then one will be created and returned. If return_geometry is True, then a tuple is returned containing both the network and a geometry object. """ # path = Path(path) node_file = os.path.join(path.resolve(), node_file) graph_file = os.path.join(path.resolve(), graph_file) # parsing the nodes file with open(node_file, 'r') as file: Np = sp.fromstring(file.readline().rsplit('=')[1], sep='\t', dtype=int)[0] vox_size = sp.fromstring( file.readline().rsplit(')')[1], sep='\t', )[0] # network always recreated to prevent errors network = GenericNetwork(Np=Np, Nt=0) # Define expected properies network['pore.volume'] = sp.nan scrap_lines = [file.readline() for line in range(4)] while True: vals = file.readline().split('\t') if len(vals) == 1: break network['pore.volume'][int(vals[0])] = float(vals[3]) if 'pore.' + vals[2] not in network.labels(): network['pore.' + vals[2]] = False network['pore.' + vals[2]][int(vals[0])] = True if voxel_size is None: voxel_size = vox_size * 1.0E-6 # file stores value in microns if voxel_size < 0: raise (Exception('Error - Voxel size must be specfied in ' + 'the Nodes file or as a keyword argument.')) # parsing the graph file with open(graph_file, 'r') as file: # Define expected properties network['pore.coords'] = sp.zeros((Np, 3)) * sp.nan network['pore.types'] = sp.nan network['pore.color'] = sp.nan network['pore.radius'] = sp.nan network['pore.dmax'] = sp.nan network['pore.node_number'] = sp.nan # Scan file to get pore coordinate data scrap_lines = [file.readline() for line in range(3)] line = file.readline() xmax = 0.0 ymax = 0.0 zmax = 0.0 node_num = 0 while line != 'connectivity table\n': vals = sp.fromstring(line, sep='\t') xmax = vals[1] if vals[1] > xmax else xmax ymax = vals[2] if vals[2] > ymax else ymax zmax = vals[3] if vals[3] > zmax else zmax network['pore.coords'][int(vals[0]), :] = vals[1:4] network['pore.types'][int(vals[0])] = vals[4] network['pore.color'][int(vals[0])] = vals[5] network['pore.radius'][int(vals[0])] = vals[6] network['pore.dmax'][int(vals[0])] = vals[7] network['pore.node_number'][int(vals[0])] = node_num node_num += 1 line = file.readline() # Scan file to get to connectivity data scrap_lines.append(file.readline()) # Skip line # Create sparse lil array incrementally build adjacency matrix lil = sp.sparse.lil_matrix((Np, Np), dtype=int) while True: vals = sp.fromstring(file.readline(), sep='\t', dtype=int) if len(vals) <= 1: break lil.rows[vals[0]] = vals[2:] lil.data[vals[0]] = sp.ones(vals[1]) # fixing any negative volumes or distances so they are 1 voxel/micron network['pore.volume'][sp.where(network['pore.volume'] < 0)[0]] = 1.0 network['pore.radius'][sp.where(network['pore.radius'] < 0)[0]] = 1.0 network['pore.dmax'][sp.where(network['pore.dmax'] < 0)[0]] = 1.0 # Add adjacency matrix to OpenPNM network conns = sp.sparse.triu(lil, k=1, format='coo') network.update({'throat.all': sp.ones(len(conns.col), dtype=bool)}) network['throat.conns'] = sp.vstack([conns.row, conns.col]).T network['pore.to_trim'] = False network['pore.to_trim'][network.pores('*throat')] = True Ts = network.pores('to_trim') new_conns = network.find_neighbor_pores(pores=Ts, flatten=False) extend(network=network, throat_conns=new_conns, labels='new_conns') for item in network.props('pore'): item = item.split('.')[1] arr = sp.ones_like(network['pore.' + item])[0] arr = sp.tile(A=arr, reps=[network.Nt, 1]) * sp.nan network['throat.' + item] = sp.squeeze(arr) network['throat.'+item][network.throats('new_conns')] = \ network['pore.'+item][Ts] trim(network=network, pores=Ts) # setting up boundary pores x_coord, y_coord, z_coord = sp.hsplit(network['pore.coords'], 3) network['pore.front_boundary'] = sp.ravel(x_coord == 0) network['pore.back_boundary'] = sp.ravel(x_coord == xmax) network['pore.left_boundary'] = sp.ravel(y_coord == 0) network['pore.right_boundary'] = sp.ravel(y_coord == ymax) network['pore.bottom_boundary'] = sp.ravel(z_coord == 0) network['pore.top_boundary'] = sp.ravel(z_coord == zmax) # removing any pores that got classified as a boundary pore but # weren't labled a border_cell_face ps = sp.where(~sp.in1d(network.pores('*_boundary'), network.pores('border_cell_face')))[0] ps = network.pores('*_boundary')[ps] for side in ['front', 'back', 'left', 'right', 'top', 'bottom']: network['pore.' + side + '_boundary'][ps] = False # setting internal label network['pore.internal'] = False network['pore.internal'][network.pores('*_boundary', mode='not')] = True # adding props to border cell face throats and from pores Ts = sp.where( network['throat.conns'][:, 1] > network.pores('border_cell_face')[0] - 1)[0] faces = network['throat.conns'][Ts, 1] for item in network.props('pore'): item = item.split('.')[1] network['throat.' + item][Ts] = network['pore.' + item][faces] network['pore.volume'][faces] = 0.0 # applying unit conversions # TODO: Determine if radius and dmax are indeed microns and not voxels network['pore.coords'] = network['pore.coords'] * 1e-6 network['pore.radius'] = network['pore.radius'] * 1e-6 network['pore.dmax'] = network['pore.dmax'] * 1e-6 network['pore.volume'] = network['pore.volume'] * voxel_size**3 network['throat.coords'] = network['throat.coords'] * 1e-6 network['throat.radius'] = network['throat.radius'] * 1e-6 network['throat.dmax'] = network['throat.dmax'] * 1e-6 network['throat.volume'] = network['throat.volume'] * voxel_size**3 return network.project
def load(cls, path, node_file="throats_cellsThroatsGraph_Nodes.txt", graph_file="throats_cellsThroatsGraph.txt", network=None, voxel_size=None, return_geometry=False): r""" Loads network data from an iMorph processed image stack Parameters ---------- path : string The path of the folder where the subfiles are held node_file : string The file that describes the pores and throats, the default iMorph name is: throats_cellsThroatsGraph_Nodes.txt graph_file : string The file that describes the connectivity of the network, the default iMorph name is: throats_cellsThroatsGraph.txt network : OpenPNM Network Object The OpenPNM Network onto which the data should be loaded. If no network is supplied then an empty import network is created and returned. voxel_size : float Allows the user to define a voxel size different than what is contained in the node_file. The value must be in meters. return_geometry : Boolean If True, then all geometrical related properties are removed from the Network object and added to a GenericGeometry object. In this case the method returns a tuple containing (network, geometry). If False (default) then the returned Network will contain all properties that were in the original file. In this case, the user can call the ```split_geometry``` method explicitly to perform the separation. Returns ------- If no Network object is supplied then one will be created and returned. If return_geometry is True, then a tuple is returned containing both the network and a geometry object. """ # path = Path(path) node_file = os.path.join(path.resolve(), node_file) graph_file = os.path.join(path.resolve(), graph_file) # parsing the nodes file with open(node_file, 'r') as file: Np = sp.fromstring(file.readline().rsplit('=')[1], sep='\t', dtype=int)[0] vox_size = sp.fromstring(file.readline().rsplit(')')[1], sep='\t',)[0] # network always recreated to prevent errors network = GenericNetwork(Np=Np, Nt=0) # Define expected properies network['pore.volume'] = sp.nan scrap_lines = [file.readline() for line in range(4)] while True: vals = file.readline().split('\t') if len(vals) == 1: break network['pore.volume'][int(vals[0])] = float(vals[3]) if 'pore.'+vals[2] not in network.labels(): network['pore.'+vals[2]] = False network['pore.'+vals[2]][int(vals[0])] = True if voxel_size is None: voxel_size = vox_size * 1.0E-6 # file stores value in microns if voxel_size < 0: raise(Exception('Error - Voxel size must be specfied in ' + 'the Nodes file or as a keyword argument.')) # parsing the graph file with open(graph_file, 'r') as file: # Define expected properties network['pore.coords'] = sp.zeros((Np, 3))*sp.nan network['pore.types'] = sp.nan network['pore.color'] = sp.nan network['pore.radius'] = sp.nan network['pore.dmax'] = sp.nan network['pore.node_number'] = sp.nan # Scan file to get pore coordinate data scrap_lines = [file.readline() for line in range(3)] line = file.readline() xmax = 0.0 ymax = 0.0 zmax = 0.0 node_num = 0 while line != 'connectivity table\n': vals = sp.fromstring(line, sep='\t') xmax = vals[1] if vals[1] > xmax else xmax ymax = vals[2] if vals[2] > ymax else ymax zmax = vals[3] if vals[3] > zmax else zmax network['pore.coords'][int(vals[0]), :] = vals[1:4] network['pore.types'][int(vals[0])] = vals[4] network['pore.color'][int(vals[0])] = vals[5] network['pore.radius'][int(vals[0])] = vals[6] network['pore.dmax'][int(vals[0])] = vals[7] network['pore.node_number'][int(vals[0])] = node_num node_num += 1 line = file.readline() # Scan file to get to connectivity data scrap_lines.append(file.readline()) # Skip line # Create sparse lil array incrementally build adjacency matrix lil = sp.sparse.lil_matrix((Np, Np), dtype=int) while True: vals = sp.fromstring(file.readline(), sep='\t', dtype=int) if len(vals) <= 1: break lil.rows[vals[0]] = vals[2:] lil.data[vals[0]] = sp.ones(vals[1]) # fixing any negative volumes or distances so they are 1 voxel/micron network['pore.volume'][sp.where(network['pore.volume'] < 0)[0]] = 1.0 network['pore.radius'][sp.where(network['pore.radius'] < 0)[0]] = 1.0 network['pore.dmax'][sp.where(network['pore.dmax'] < 0)[0]] = 1.0 # Add adjacency matrix to OpenPNM network conns = sp.sparse.triu(lil, k=1, format='coo') network.update({'throat.all': sp.ones(len(conns.col), dtype=bool)}) network['throat.conns'] = sp.vstack([conns.row, conns.col]).T network['pore.to_trim'] = False network['pore.to_trim'][network.pores('*throat')] = True Ts = network.pores('to_trim') new_conns = network.find_neighbor_pores(pores=Ts, flatten=False) extend(network=network, throat_conns=new_conns, labels='new_conns') for item in network.props('pore'): item = item.split('.')[1] arr = sp.ones_like(network['pore.'+item])[0] arr = sp.tile(A=arr, reps=[network.Nt, 1])*sp.nan network['throat.'+item] = sp.squeeze(arr) network['throat.'+item][network.throats('new_conns')] = \ network['pore.'+item][Ts] trim(network=network, pores=Ts) # setting up boundary pores x_coord, y_coord, z_coord = sp.hsplit(network['pore.coords'], 3) network['pore.front_boundary'] = sp.ravel(x_coord == 0) network['pore.back_boundary'] = sp.ravel(x_coord == xmax) network['pore.left_boundary'] = sp.ravel(y_coord == 0) network['pore.right_boundary'] = sp.ravel(y_coord == ymax) network['pore.bottom_boundary'] = sp.ravel(z_coord == 0) network['pore.top_boundary'] = sp.ravel(z_coord == zmax) # removing any pores that got classified as a boundary pore but # weren't labled a border_cell_face ps = sp.where(~sp.in1d(network.pores('*_boundary'), network.pores('border_cell_face')))[0] ps = network.pores('*_boundary')[ps] for side in ['front', 'back', 'left', 'right', 'top', 'bottom']: network['pore.'+side+'_boundary'][ps] = False # setting internal label network['pore.internal'] = False network['pore.internal'][network.pores('*_boundary', mode='not')] = True # adding props to border cell face throats and from pores Ts = sp.where(network['throat.conns'][:, 1] > network.pores('border_cell_face')[0] - 1)[0] faces = network['throat.conns'][Ts, 1] for item in network.props('pore'): item = item.split('.')[1] network['throat.'+item][Ts] = network['pore.'+item][faces] network['pore.volume'][faces] = 0.0 # applying unit conversions # TODO: Determine if radius and dmax are indeed microns and not voxels network['pore.coords'] = network['pore.coords'] * 1e-6 network['pore.radius'] = network['pore.radius'] * 1e-6 network['pore.dmax'] = network['pore.dmax'] * 1e-6 network['pore.volume'] = network['pore.volume'] * voxel_size**3 network['throat.coords'] = network['throat.coords'] * 1e-6 network['throat.radius'] = network['throat.radius'] * 1e-6 network['throat.dmax'] = network['throat.dmax'] * 1e-6 network['throat.volume'] = network['throat.volume'] * voxel_size**3 return network.project