def readIM7(self): """ Load the image from the the buffer/file (for .IM7 files) """ extn = os.path.splitext(self.loadfile)[1] if not extn.lower() == ".im7": return try: buff, atts = self._get_Buffer_andAttributeList() attdict = ReadIM.extra.att2dict(atts) self.data["attributes"].update(attdict) vbuff, buff = ReadIM.extra.buffer_as_array(buff) if buff.image_sub_type > 0: raise TypeError("buffer does not contain an image: type = %s" % buff.image_sub_type) self._set_buffer(buff) vbuff2 = self.map_i(vbuff) self.data["I"] = np.ma.masked_equal(vbuff2, 0.0) finally: env = dir() if "vbuff" in env: del vbuff # Yes you need to do this to stop memory leaks if "vbuff2" in env: del vbuff2 if "buff" in env: ReadIM.DestroyBuffer(buff) del buff if "atts" in env: ReadIM.DestroyAttributeListSafe(atts) del atts if "attdict" in env: del attdict
def readPIVvc7_fieldsOnly(filename): """ Read the vc7 file return Velocity only """ buff, piv_atts = ReadIM.extra.get_Buffer_andAttributeList(filename) data, buff = ReadIM.extra.buffer_as_array(buff) data = -1. * data * buff.scaleI.factor #transpose, the software expects (ny,nx tables) data = np.transpose(data) data = np.moveaxis(data, -1, 0) u1 = data[1, :, ::-1] v1 = data[0, :, ::-1] u2 = data[3, :, ::-1] v2 = data[2, :, ::-1] # Careful : need to extract a portion of the field # Davis stores the entire grid of data for each camera # although each camera may not contribute to the entire fov. # Davis puts zeros where the camera doesn't see. u1 = u1[55:-55, 0:615] v1 = v1[55:-55, 0:615] u2 = u2[50:-50, 420:1202] v2 = v2[50:-50, 420:1202] ReadIM.DestroyBuffer(buff) ReadIM.DestroyAttributeListSafe(piv_atts) #return only velocities return (u1, v1, u2, v2)
def test_bufferAlt(self): import numpy as np window = [(0,10),(10,0)] buffAlt = ReadIM.newBuffer(window,3,3,2) buffAlt.scaleX.offset = np.array(1) buffNew = ReadIM.BufferTypeAlt(buffAlt) assert not (buffAlt is buffNew) assert type(buffNew.scaleX.offset) is float, "{0}".format(type(buffAlt.scaleX.offset)) pass
def readPIVvc7(filename): """ Read DaVis vc7 file """ ############################################################### # Be careful data might be transposed and, x and y inverted # it depends on the calibration wich read internally by Davis ############################################################## buff, piv_atts = ReadIM.extra.get_Buffer_andAttributeList(filename) u1, v1, u2, v2 = readPIVvc7_fieldsOnly(filename) #create the grids x1 = np.linspace( buff.scaleY.offset + buff.scaleY.factor * buff.ny * buff.vectorGrid, buff.scaleY.offset, buff.ny) y1 = np.linspace( buff.scaleX.offset, buff.scaleX.offset + buff.scaleX.factor * buff.nx * buff.vectorGrid, buff.nx) if buff.nf == 1: ReadIM.DestroyBuffer(buff) ReadIM.DestroyAttributeListSafe(piv_atts) return (x1, y1, dataout[0, :, :].T, dataout[1, :, :].T, buff) elif buff.nf == 2: #New Davis 10 procedure for storing average data. x2 = x1 y2 = y1 # Careful : need to extract a portion of the field # Davis stores the entire grid of data for each camera # although each camera may not contribute to the entire fov. # Davis puts zeros where the camera doesn't see. x1 = x1[0:615] y1 = y1[55:-55] x2 = x2[420:1202] y2 = y2[50:-50] ReadIM.DestroyBuffer(buff) ReadIM.DestroyAttributeListSafe(piv_atts) #plt.contourf(x2,y2,u2) #plt.plot(u2[:,775]) #plt.plot(u2[::-1,775]) return (x1, y1, u1, v1, x2, y2, u2, v2, buff)
def test_create_im7(self): window = [(0,10),(10,0)] for tp in ReadIM.BUFFER_FORMATS: buffAlt = ReadIM.newBuffer(window,3,3,2, tp, 2) buff, errorcode = ReadIM.createBuffer(buffAlt) err_msg = 'Error creating buffer' assert_equal(errorcode, 1, err_msg) # data arr = ReadIM.buffer_as_array(buff) # monotonically increasing data for i in range(len(arr)): arr[i] attributes = dict(type=ReadIM.BUFFER_FORMATS[tp]) atts = ReadIM.load_AttributeList(attributes) ReadIM.WriteIM7('packed_im{0}.im7'.format(tp), True, buff, atts.next) ReadIM.WriteIM7('not_packed_im{0}.im7'.format(tp), False, buff, atts.next) ReadIM.DestroyBuffer(buff) ReadIM.DestroyAttributeListSafe(atts)
def _get_Buffer_(self): """ Get buffer from self.loadfile. Only valid for IM7 and VC7 filetypes. Returns ------- buff: BufferType """ return ReadIM.get_Buffer_andAttributeList(self.loadfile, atts=None)[0]
def read_INSTPIV_fieldsOnly(filename): """ read the vc7 file for Davis version lower than Davis 10 """ buff, piv_atts = ReadIM.extra.get_Buffer_andAttributeList(filename) data, _ = ReadIM.extra.buffer_as_array(buff) if (data.shape[0] <= buff.nf * 2): raise ValueError( 'Davis data format not supported by this routine. Try readPIVvc7') # get the velocity - Frame 1 # we discard the choices > 3 that correspond to secondary peaks. choices = np.where(data[0] > 3, 3, data[0]) u1 = np.empty(choices.shape) v1 = np.empty(choices.shape) # the different data buffers contain the successively computed # displacements in a cumuluative way for (i, j), choice in np.ndenumerate(choices): u1[i, j] = np.sum( data[1:2 * int(choice) + 1:2][:, i, j]) * buff.scaleI.factor v1[i, j] = np.sum( data[2:2 * int(choice) + 2:2][:, i, j]) * buff.scaleI.factor # get the velocity - Frame 2 # we discard the choices > 3 that correspond to secondary peaks. choices = np.where(data[10] > 3, 3, data[10]) u2 = np.empty(choices.shape) v2 = np.empty(choices.shape) # the different data buffers contain the successively computed # displacements in a cumuluative way for (i, j), choice in np.ndenumerate(choices): u2[i, j] = np.sum( data[11:2 * int(choice) + 11:2][:, i, j]) * buff.scaleI.factor v2[i, j] = np.sum( data[12:2 * int(choice) + 12:2][:, i, j]) * buff.scaleI.factor ReadIM.DestroyBuffer(buff) ReadIM.DestroyAttributeListSafe(piv_atts) #return only velocities return u1, v1, u2, v2
def test_load_im(self): for f in files: buff, atts = ReadIM.extra.get_Buffer_andAttributeList(f) arr, buff2 = ReadIM.extra.buffer_as_array(buff) err_msg = 'Problem unpacking file to array {0}'.format(buff) if buff.image_sub_type <=0: components = 1 else: components = ReadIM.core.GetVectorComponents(buff.image_sub_type) components *= buff.nf assert_equal(arr.shape, (components, buff.ny, buff.nx), err_msg) ReadIM.extra.att2dict(atts) ReadIM.DestroyBuffer(buff) ReadIM.DestroyAttributeListSafe(atts)
def _get_Buffer_andAttributeList(self): """ Get buffer and Attribute list from self.loadfile. Only valid for IM7 and VC7 filetypes. Returns ------- buff: BufferType atts: Attribute List """ try: return ReadIM.get_Buffer_andAttributeList(self.loadfile) except IOError: raise IOError("Problem loading file: {0}.Message: {1}".format( self.loadfile, sys.exc_info()[1]))
def _im7_writer(self, dst): """ write the current buffer as masked to dst """ buff = ReadIM.BufferTypeAlt(self.data["buffer"]) buff.isFloat = self.dtype == np.float32 try: # DaVis objects buff, error_code = ReadIM.extra.createBuffer(buff) if error_code > 1: raise IOError("Error code %s=%s" % (ReadIM.ERROR_CODES[error_code], error_code)) vbuff, buff2 = ReadIM.extra.buffer_as_array(buff) self.update_attributes_with_scales() atts = ReadIM.load_AttributeList(self.data["attributes"]) vbuff[:] = 0 setzero = np.logical_not(self.data["I"].mask) vbuff[:] = self.map_I(self.data["I"]) * setzero # write from buffer err = ReadIM.WriteIM7(dst, True, buff2, atts.next) if err: codes = codes = dict([(getattr(ReadIM.core, f), f) for f in dir(ReadIM.core) if f.find("IMREAD_ERR") == 0]) if err == 1: print( "Note that you cannot write into the top level directory for some strange reason" ) raise IOError("Error writing file: %s=%s" % (err, codes[err])) print("Created %s" % os.path.basename(dst)) # clean up finally: env = dir() if "vbuff " in env: del vbuff # Yes you need to do this to stop memory leaks if "buff" in env: ReadIM.DestroyBuffer(buff) del buff if "buff2" in env: ReadIM.DestroyBuffer(buff2) del buff2 if "atts" in env: ReadIM.DestroyAttributeListSafe(atts) del atts
def load_vc7(path, time=0): """ input path for files format from davis tested for im7&vc7 out put [X Y U V mask] valid only for 2d piv cases RETURN: in case of images (image type=0): X = scaled x-coordinates Y = scaled y-coordinates U = scaled image intensities v=0 MASK=0 in case of 2D vector fields (A.IType = 1,2 or 3): X = scaled x-coordinates Y = scaled y-coordinates U = scaled vx-components of vectors V = scaled vy-components of vectors """ #you need to add clear to prevent data leaks buff, vatts = ReadIM.extra.get_Buffer_andAttributeList(path) v_array, buff1 = ReadIM.extra.buffer_as_array(buff) nx = buff.nx nz = buff.nz ny = buff.ny #set data range: baseRangeX = np.arange(nx) baseRangeY = np.arange(ny) # baseRangeZ = np.arange(nz) lhs1 = ( baseRangeX + 0.5 ) * buff.vectorGrid * buff.scaleX.factor + buff.scaleX.offset # x-range lhs2 = ( baseRangeY + 0.5 ) * buff.vectorGrid * buff.scaleY.factor + buff.scaleY.offset #y-range lhs3 = 0 lhs4 = 0 mask = 0 if buff.image_sub_type <= 0: #grayvalue image format [lhs1, lhs2] = np.meshgrid(lhs1, lhs2) lhs3 = v_array[0, :, :] lhs4 = v_array[1, :, :] Im = xr.DataArray(v_array, dims=('frame', 'z', 'x'), coords={ 'x': lhs1[0, :], 'z': lhs2[:, 0], 'frame': [0, 1] }) data = xr.Dataset({'Im': Im}) elif buff.image_sub_type == 2: # simple 2D vector format: (vx,vy) # Calculate vector position and components [lhs1, lhs2] = np.meshgrid(lhs1, lhs2) # lhs1=np.transpose(lhs1) # lhs2=np.transpose(lhs2) lhs3 = v_array[0, :, :] * buff.scaleI.factor + buff.scaleI.offset lhs4 = v_array[1, :, :] * buff.scaleI.factor + buff.scaleI.offset if buff.scaleY.factor < 0.0: lhs4 = -lhs4 lhs3 = lhs3[:, :, np.newaxis] lhs4 = lhs4[:, :, np.newaxis] u = xr.DataArray(lhs3, dims=('z', 'x', 't'), coords={ 'x': lhs1[0, :], 'z': lhs2[:, 0], 't': [time] }) v = xr.DataArray(lhs4, dims=('z', 'x', 't'), coords={ 'x': lhs1[0, :], 'z': lhs2[:, 0], 't': [time] }) data = xr.Dataset({'u': u, 'v': v}) # plt.quiver(lhs1,lhs2,lhs3,lhs4); elif buff.image_sub_type == 3 or buff.image_sub_type == 1: #normal 2D vector format + peak: sel+4*(vx,vy) (+peak) #Calculate vector position and components [lhs1, lhs2] = np.meshgrid(lhs1, lhs2) # lhs1=np.transpose(lhs1) # lhs2=np.transpose(lhs2) lhs3 = lhs1 * 0 lhs4 = lhs2 * 0 # Get choice maskData = np.int8(v_array[0, :, :]) # Build best vectors from choice field for i in range(5): mask = maskData == (i + 1) if (i < 4): # get best vectors dat = v_array[2 * i + 1, :, :] lhs3[mask] = dat[mask] dat = v_array[2 * i + 2, :, :] lhs4[mask] = dat[mask] else: # get interpolated vectors dat = v_array[7, :, :] lhs3[mask] = dat[mask] dat = v_array[8, :, :] lhs4[mask] = dat[mask] lhs3 = lhs3 * buff.scaleI.factor + buff.scaleI.offset lhs4 = lhs4 * buff.scaleI.factor + buff.scaleI.offset #Display vector field if buff.scaleY.factor < 0.0: lhs4 = -1 * lhs4 lhs3 = lhs3.T[:, :, np.newaxis] lhs4 = lhs4.T[:, :, np.newaxis] chc = maskData.T[:, :, np.newaxis] u = xr.DataArray(lhs3, dims=('x', 'y', 't'), coords={ 'x': lhs1[0, :], 'y': lhs2[:, 0], 't': [time] }) v = xr.DataArray(lhs4, dims=('x', 'y', 't'), coords={ 'x': lhs1[0, :], 'y': lhs2[:, 0], 't': [time] }) chc = xr.DataArray(chc, dims=('x', 'y', 't'), coords={ 'x': lhs1[0, :], 'y': lhs2[:, 0], 't': [time] }) data = xr.Dataset({'u': u, 'v': v, 'chc': chc}) if buff.image_sub_type > 0: data.attrs = ReadIM.extra.att2dict(vatts) data.attrs['variables'] = ['x', 'y', 'u', 'v'] data.attrs['units'] = ['mm', 'mm', 'm/s', 'm/s'] data.attrs['dt'] = int(data.attrs['FrameDt0'][:-3]) data.attrs['files'] = path #clean memory ReadIM.DestroyBuffer(buff1) del (buff1) ReadIM.DestroyBuffer(buff) del (buff) ReadIM.DestroyAttributeListSafe(vatts) del (vatts) return data
def ReadDavis(path, time=0): """ input path for files format from davis tested for im7&vc7 out put [X Y U V mask] valid only for 2d piv cases RETURN: in case of images (image type=0): X = scaled x-coordinates Y = scaled y-coordinates U = scaled image intensities v=0 MASK=0 in case of 2D vector fields (A.IType = 1,2 or 3): X = scaled x-coordinates Y = scaled y-coordinates U = scaled vx-components of vectors V = scaled vy-components of vectors """ # you need to add clear to prevent data leaks buff, vatts = ReadIM.extra.get_Buffer_andAttributeList(path) v_array, buff1 = ReadIM.extra.buffer_as_array(buff) nx = buff.nx # nz = buff.nz #flake8 claims it's not used ny = buff.ny # set data range: baseRangeX = np.arange(nx) baseRangeY = np.arange(ny) # baseRangeZ = np.arange(nz) #flake8 recognized it's not used lhs1 = ( baseRangeX + 0.5 ) * buff.vectorGrid * buff.scaleX.factor + buff.scaleX.offset # x-range lhs2 = ( baseRangeY + 0.5 ) * buff.vectorGrid * buff.scaleY.factor + buff.scaleY.offset # y-range lhs3 = 0 lhs4 = 0 mask = 0 if buff.image_sub_type <= 0: # grayvalue image format lhs3 = v_array[0, :, :] lhs4 = v_array[1, :, :] Im = xr.DataArray( v_array, dims=("frame", "z", "x"), coords={ "x": lhs1[0, :], "z": lhs2[:, 0], "frame": [0, 1] }, ) data = xr.Dataset({"Im": Im}) elif buff.image_sub_type == 2: # simple 2D vector format: (vx,vy) # Calculate vector position and components [lhs1, lhs2] = np.meshgrid(lhs1, lhs2) # lhs1=np.transpose(lhs1) # lhs2=np.transpose(lhs2) lhs3 = v_array[0, :, :] * buff.scaleI.factor + buff.scaleI.offset lhs4 = v_array[1, :, :] * buff.scaleI.factor + buff.scaleI.offset if buff.scaleY.factor < 0.0: lhs4 = -lhs4 lhs3 = lhs3[:, :, np.newaxis] lhs4 = lhs4[:, :, np.newaxis] u = xr.DataArray( lhs3, dims=("z", "x", "t"), coords={ "x": lhs1[0, :], "z": lhs2[:, 0], "t": [time] }, ) v = xr.DataArray( lhs4, dims=("z", "x", "t"), coords={ "x": lhs1[0, :], "z": lhs2[:, 0], "t": [time] }, ) data = xr.Dataset({"u": u, "v": v}) # plt.quiver(lhs1,lhs2,lhs3,lhs4); elif buff.image_sub_type == 3 or buff.image_sub_type == 1: # normal 2D vector format + peak: sel+4*(vx,vy) (+peak) # Calculate vector position and components [lhs1, lhs2] = np.meshgrid(lhs1, lhs2) # lhs1=np.transpose(lhs1) # lhs2=np.transpose(lhs2) lhs3 = lhs1 * 0 lhs4 = lhs2 * 0 # Get choice maskData = v_array[0, :, :] # Build best vectors from choice field for i in range(5): mask = maskData == (i + 1) if i < 4: # get best vectors dat = v_array[2 * i + 1, :, :] lhs3[mask] = dat[mask] dat = v_array[2 * i + 2, :, :] lhs4[mask] = dat[mask] else: # get interpolated vectors dat = v_array[7, :, :] lhs3[mask] = dat[mask] dat = v_array[8, :, :] lhs4[mask] = dat[mask] lhs3 = lhs3 * buff.scaleI.factor + buff.scaleI.offset lhs4 = lhs4 * buff.scaleI.factor + buff.scaleI.offset # Display vector field if buff.scaleY.factor < 0.0: lhs4 = -1 * lhs4 mask = maskData == 0 lhs3 = lhs3[:, :, np.newaxis] lhs4 = lhs4[:, :, np.newaxis] maskData = maskData[:, :, np.newaxis] u = xr.DataArray( lhs3, dims=("z", "x", "t"), coords={ "x": lhs1[0, :], "z": lhs2[:, 0], "t": [time] }, ) v = xr.DataArray( lhs4, dims=("z", "x", "t"), coords={ "x": lhs1[0, :], "z": lhs2[:, 0], "t": [time] }, ) chc = xr.DataArray( maskData, dims=("z", "x", "t"), coords={ "x": lhs1[0, :], "z": lhs2[:, 0], "t": [time] }, ) data = xr.Dataset({"u": u, "v": v, "chc": chc}) data.attrs["Info"] = ReadIM.extra.att2dict(vatts) # clean memory ReadIM.DestroyBuffer(buff1) del buff1 ReadIM.DestroyBuffer(buff) del buff ReadIM.DestroyAttributeListSafe(vatts) del vatts return data
def _set_buffer(self, buff): buff = ReadIM.BufferTypeAlt(buff, False, immutable=True) self.data["buffer"] = {} ReadIM.extra.obj2dict(buff, self.data["buffer"])
def convert_vc7(vc7_folder_path, dt): """ Converts a 2 dimensional, 2 component VC7 file into the HDF5 format. Parameters ---------- vc7_folder_path : string Path to a folder containing a collection of vc7 files. Returns ------- tuple Author(s) --------- Jia Cheng Hu """ # Import all the nessessary libraries import ReadIM import glob # Get all file path all_vc7_path = glob.glob(os.path.join(vc7_folder_path, '*.vc7')) # Get information of the first frames for Initialization first_vbuff, first_vattr = ReadIM.get_Buffer_andAttributeList( all_vc7_path[0]) first_vattr_dict = ReadIM.att2dict(first_vattr) # Initialize storage dictionary for each camera data_all_cam = [] for n_cam in range(first_vbuff.nf): u = np.zeros((first_vbuff.nx, first_vbuff.ny, len(all_vc7_path))) v = np.zeros((first_vbuff.nx, first_vbuff.ny, len(all_vc7_path))) dx = float(first_vattr_dict['FrameScaleX' + str(n_cam)].splitlines() [0]) * first_vbuff.vectorGrid / 1000 dy = -float(first_vattr_dict['FrameScaleY' + str(n_cam)].splitlines() [0]) * first_vbuff.vectorGrid / 1000 x0 = float(first_vattr_dict['FrameScaleX' + str(n_cam)].splitlines()[1]) / 1000 y0 = float(first_vattr_dict['FrameScaleY' + str(n_cam)].splitlines()[1]) / 1000 x = x0 + dx * (np.arange(first_vbuff.nx) + 1 / 2) y = y0 - dy * (np.arange(first_vbuff.ny) - 1 / 2) xx, yy = np.meshgrid(x, y, indexing='ij') data_all_cam.append(piv.Field2D(dt, xx, yy, [u, v])) # Load velocity vector fields for i, vc7_path in enumerate(all_vc7_path): vbuff, vattr = ReadIM.get_Buffer_andAttributeList(vc7_path) v_array = ReadIM.buffer_as_array(vbuff)[0] for n_cam, data in enumerate(data_all_cam): # PIV Mask mask = np.ones((first_vbuff.ny, first_vbuff.nx)) mask[v_array[n_cam * 10] == 0] = np.nan # Vector scaling scaleI = float( ReadIM.att2dict(vattr)['FrameScaleI' + str(n_cam)].splitlines()[0]) # Load velocity data[0, :, :, i] = (v_array[1 + n_cam * 10] * scaleI * mask).T data[1, :, :, i] = -(v_array[2 + n_cam * 10] * scaleI * mask).T return tuple(data_all_cam)