Esempio n. 1
0
def read_surface(fname, verbose=None):
    """Load a Freesurfer surface mesh in triangular format

    Parameters
    ----------
    fname : str
        The name of the file containing the surface.
    verbose : bool, str, int, or None
        If not None, override default verbose level (see mne.verbose).

    Returns
    -------
    rr : array, shape=(n_vertices, 3)
        Coordinate points.
    tris : int array, shape=(n_faces, 3)
        Triangulation (each line contains indexes for three points which
        together form a face).
    """
    with open(fname, "rb", buffering=0) as fobj:  # buffering=0 for np bug
        magic = _fread3(fobj)
        if (magic == 16777215) or (magic == 16777213):  # Quad file or new quad
            nvert = _fread3(fobj)
            nquad = _fread3(fobj)
            coords = np.fromfile(fobj, ">i2", nvert * 3).astype(np.float)
            coords = coords.reshape(-1, 3) / 100.0
            quads = _fread3_many(fobj, nquad * 4)
            quads = quads.reshape(nquad, 4)
            #
            #   Face splitting follows
            #
            faces = np.zeros((2 * nquad, 3), dtype=np.int)
            nface = 0
            for quad in quads:
                if (quad[0] % 2) == 0:
                    faces[nface] = quad[0], quad[1], quad[3]
                    nface += 1
                    faces[nface] = quad[2], quad[3], quad[1]
                    nface += 1
                else:
                    faces[nface] = quad[0], quad[1], quad[2]
                    nface += 1
                    faces[nface] = quad[0], quad[2], quad[3]
                    nface += 1

        elif magic == 16777214:  # Triangle file
            create_stamp = fobj.readline()
            _ = fobj.readline()  # analysis:ignore
            vnum = np.fromfile(fobj, ">i4", 1)[0]
            fnum = np.fromfile(fobj, ">i4", 1)[0]
            #raise RuntimeError
            coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
            faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
        else:
            raise ValueError("%s does not appear to be a Freesurfer surface"
                             % fname)
        logger.info('Triangle file: %s nvert = %s ntri = %s'
                    % (create_stamp.strip(), len(coords), len(faces)))

    coords = coords.astype(np.float)  # XXX: due to mayavi bug on mac 32bits
    return coords, faces
Esempio n. 2
0
    def _read_frame(self, file_num, frame_num, raw_flag):
        pdict = self.flist[file_num]
        with open(pdict['fname'], 'rb') as fptr:
            num_data = np.fromfile(fptr, dtype='i4', count=1)[0]

            accum = [pdict['ones_accum'], pdict['multi_accum']]
            offset = [0, 0]
            size = [0, 0]

            if frame_num == 0:
                size = [accum[0][frame_num], accum[1][frame_num]]
            else:
                offset = [accum[0][frame_num-1], accum[1][frame_num-1]]
                size[0] = accum[0][frame_num] - accum[0][frame_num - 1]
                size[1] = accum[1][frame_num] - accum[1][frame_num - 1]

            fptr.seek(1024 + num_data*8 + offset[0]*4, 0)
            place_ones = np.fromfile(fptr, dtype='i4', count=size[0])
            fptr.seek(1024 + num_data*8 + accum[0][-1]*4 + offset[1]*4, 0)
            place_multi = np.fromfile(fptr, dtype='i4', count=size[1])
            fptr.seek(1024 + num_data*8 + accum[0][-1]*4 + accum[1][-1]*4 + offset[1]*4, 0)
            count_multi = np.fromfile(fptr, dtype='i4', count=size[1])

        frame = np.zeros(pdict['num_pix'], dtype='i4')
        np.add.at(frame, place_ones, 1)
        np.add.at(frame, place_multi, count_multi)
        frame *= pdict['geom'].unassembled_mask
        if not raw_flag:
            frame = self._assemble_frame(frame, file_num)
        return frame
Esempio n. 3
0
def mnist(ntrain=60000,ntest=10000,onehot=True):
	data_dir = os.path.join(datasets_dir,'mnist/')
	fd = open(os.path.join(data_dir,'train-images.idx3-ubyte'))
	loaded = np.fromfile(file=fd,dtype=np.uint8)
	trX = loaded[16:].reshape((60000,28*28)).astype(float)

	fd = open(os.path.join(data_dir,'train-labels.idx1-ubyte'))
	loaded = np.fromfile(file=fd,dtype=np.uint8)
	trY = loaded[8:].reshape((60000))

	fd = open(os.path.join(data_dir,'t10k-images.idx3-ubyte'))
	loaded = np.fromfile(file=fd,dtype=np.uint8)
	teX = loaded[16:].reshape((10000,28*28)).astype(float)

	fd = open(os.path.join(data_dir,'t10k-labels.idx1-ubyte'))
	loaded = np.fromfile(file=fd,dtype=np.uint8)
	teY = loaded[8:].reshape((10000))

	trX = trX/255.
	teX = teX/255.

	trX = trX[:ntrain]
	trY = trY[:ntrain]

	teX = teX[:ntest]
	teY = teY[:ntest]

	if onehot:
		trY = one_hot(trY, 10)
		teY = one_hot(teY, 10)
	else:
		trY = np.asarray(trY)
		teY = np.asarray(teY)

	return trX,teX,trY,teY
Esempio n. 4
0
def loadlocal_mnist(images_path, labels_path):
    """ Read MNIST from ubyte files.

    Parameters
    ----------
    images_path : str
        path to the test or train MNIST ubyte file
    labels_path : str
        path to the test or train MNIST class labels file

    Returns
    --------
    images : [n_samples, n_pixels] numpy.array
        Pixel values of the images.
    labels : [n_samples] numpy array
        Target class labels

    """
    with open(labels_path, 'rb') as lbpath:
        magic, n = struct.unpack('>II',
                                 lbpath.read(8))
        labels = np.fromfile(lbpath,
                             dtype=np.uint8)
    with open(images_path, 'rb') as imgpath:
        magic, num, rows, cols = struct.unpack(">IIII",
                                               imgpath.read(16))
        images = np.fromfile(imgpath,
                             dtype=np.uint8).reshape(len(labels), 784)

    return images, labels
Esempio n. 5
0
def getCoincidences(fTimes, fChans, gate, radius, heraldChan):
  bufRes = 156.25e-12
  gate = int(gate/bufRes)
  radius = int(radius/bufRes)
  coin = np.zeros([8,8], dtype = np.uint64)
  times = np.fromfile(fTimes, dtype = np.uint64)
  chans = np.fromfile(fChans, dtype = np.uint8)
  #print "len(times), len(chans)", len(times), len(chans)
  for chan in range(8,16):
    colIdx = np.where(chans==chan)[0]
    for idx in colIdx:
      #print "chans[idx]: %d"%chans[idx]
      #print "few chans: ",chans[idx-3:idx+3]
      #print "few times: ",times[idx-3:idx+3]
      j = idx + 1
      while (j < len(times)) and (chans[j]==heraldChan) and (times[j] - gate <= times[idx]):
        i = idx - 1
        while (i >= 0):
          if (times[i] + radius >= times[idx]) and (chans[idx] != chans[i]) and chans[i] < 8:
            row = chans[i]
            col = chans[idx] % 8
            coin[row, col] += 1
            break
          elif (times[i] + radius <= times[idx]):
            row = heraldChan % 8 #works even if for some reason we had the rows plugged into channels 8-15 of the tagger
            col = chans[idx] % 8
            coin[row, col] += 1
            break
          i -= 1
        j += 1
  return coin
        
  """
Esempio n. 6
0
def read_morph_data(filepath):
    """Read a Freesurfer morphometry data file.

    This function reads in what Freesurfer internally calls "curv" file types,
    (e.g. ?h. curv, ?h.thickness), but as that has the potential to cause
    confusion where "curv" also refers to the surface curvature values,
    we refer to these files as "morphometry" files with PySurfer.

    Parameters
    ----------
    filepath : str
        Path to morphometry file

    Returns
    -------
    curv : numpy array
        Vector representation of surface morpometry values

    """
    with open(filepath, "rb") as fobj:
        magic = _fread3(fobj)
        if magic == 16777215:
            vnum = np.fromfile(fobj, ">i4", 3)[0]
            curv = np.fromfile(fobj, ">f4", vnum)
        else:
            vnum = magic
            _fread3(fobj)
            curv = np.fromfile(fobj, ">i2", vnum) / 100
    return curv
Esempio n. 7
0
    def Read(self):

        #return numpy.ones((256, 819)).astype('float32'), numpy.ones(256).astype('int32')

        with open(self.featureFile,"rb") as f:

            dt = numpy.dtype([('numSamples',(numpy.int32,1)),('sampPeriod',(numpy.int32,1)),('sampSize',(numpy.int16,1)),('sampKind',(numpy.int16,1))])
            header =  numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=1)

            numSamples = header[0]['numSamples']
            sampPeriod = header[0]['sampPeriod']
            sampSize   = header[0]['sampSize']
            sampKind   = header[0]['sampKind']

            # print 'Num samples = {}'.format(numSamples)
            # print 'Sample period = {}'.format(sampPeriod)
            # print 'Sample size = {}'.format(sampSize)
            # print 'Sample kind = {}'.format(sampKind)
            dt = numpy.dtype([('sample',(numpy.float32,sampSize/4))])
            samples = numpy.fromfile(f,dt.newbyteorder('>' if self.byteOrder==ByteOrder.BigEndian else '<'),count=numSamples)

        self._markDone()

        if self.labelFile is None:
            labels = None
        else:
            labels = ReadLabel(self.labelFile)

        return samples[:]['sample'], labels
Esempio n. 8
0
def _read_volume_info(fobj):
    """An implementation of nibabel.freesurfer.io._read_volume_info, since old
    versions of nibabel (<=2.1.0) don't have it.
    """
    volume_info = dict()
    head = np.fromfile(fobj, '>i4', 1)
    if not np.array_equal(head, [20]):  # Read two bytes more
        head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
        if not np.array_equal(head, [2, 0, 20]):
            warnings.warn("Unknown extension code.")
            return volume_info

    volume_info['head'] = head
    for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
                'zras', 'cras']:
        pair = fobj.readline().decode('utf-8').split('=')
        if pair[0].strip() != key or len(pair) != 2:
            raise IOError('Error parsing volume info.')
        if key in ('valid', 'filename'):
            volume_info[key] = pair[1].strip()
        elif key == 'volume':
            volume_info[key] = np.array(pair[1].split()).astype(int)
        else:
            volume_info[key] = np.array(pair[1].split()).astype(float)
    # Ignore the rest
    return volume_info
Esempio n. 9
0
    def _load_ahf_particle_block(self, f):
        """Load the particles for the next halo described in particle file f"""
        ng = len(self.base.gas)
        nds = len(self.base.dark) + len(self.base.star)
        nparts = int(f.readline().split()[0])

        if self.isnew:
            if isinstance(f, file):
                data = (np.fromfile(
                    f, dtype=int, sep=" ", count=nparts*2).reshape(nparts, 2))[:, 0]
            else:
                # unfortunately with gzipped files there does not
                # seem to be an efficient way to load nparts lines
                data = np.zeros(nparts, dtype=int)
                for i in xrange(nparts):
                    data[i] = int(f.readline().split()[0])

            if self._use_iord :
                data = self._iord_to_fpos[data]
            else :
                hi_mask = data >= nds
                data[np.where(hi_mask)] -= nds
                data[np.where(~hi_mask)] += ng
        else:
            if isinstance(f, file):
                data = np.fromfile(f, dtype=int, sep=" ", count=nparts)
            else:
                # see comment above on gzipped files
                data = np.zeros(nparts, dtype=int)
                for i in xrange(nparts):
                    data[i] = int(f.readline())
        data.sort()
        return data
def train_generator():
	random.seed(seedvalue)
	valtruth = list()
	valdata = list()
	endflag = False	
	for i in range(0,len(events)):
        	for j in range(0,len(truthfiles)):
                	filename = events[i]+truthfiles[j]
                	try:
                        	truth = np.fromfile(filename,dtype=np.short)
                        	truth = truth.reshape(len(truth)//wirelength,wirelength)
                        	for k in range(0,len(datafiles[j])):
                                	filename = events[i]+datafiles[j][k]
                                	try:
                                        	data = np.fromfile(filename,dtype=np.short)
                                        	data = data.reshape(len(data)//wirelength,wirelength)
						data = np.concatenate((padarray,data,padarray),axis=0)
						
                                        	for l in range(batchsize,len(truth)+1,batchsize):
							if i>=len(events)-1 and j>=len(truthfiles)-1 and k>=len(datafiles[j])-1 and l>=len(truth): endflag=True
                                                	if random.uniform(0,1) > droptrainingwires or endflag:
                                                        	start = l - batchsize
                                                        	end = l
								t = list()
								for m in range(start,end): t.append((np_utils.to_categorical(truth[m],numlabels)))
								d = parse_attributes(data[start:end+numattributes-1])
                                                        	if random.uniform(0,1) < reservevalidation and not endflag:
									valtruth.extend(t)
									valdata.extend(d)
                                                        	else:
									yield np.asarray(d), np.asarray(t), np.asarray(valdata), np.asarray(valtruth), endflag 
                                	except IOError:
                                        	print 'No file named',filename
                	except IOError:
                        	print 'No truth file for',events[i]
Esempio n. 11
0
def read_lgal_input_fulltrees_withids(folder,lastsnap,file,verbose):
    firstfile = file
    lastfile = file 
    nTrees = 0
    nHalos = 0
    nTreeHalos = numpy.array([],dtype=numpy.int32)
    output_Halos = numpy.array([],dtype=struct_lgalinput)
    output_HaloIDs = numpy.array([],dtype=struct_lgaldbidsinput)
    ifile = file
    filename = folder+'/trees_'+"%03d"%(lastsnap)+'.'+"%d"%(ifile)
    f = open(filename,"rb")
    this_nTrees = numpy.fromfile(f,numpy.int32,1)[0]
    nTrees += this_nTrees
    this_nHalos = numpy.fromfile(f,numpy.int32,1)[0]
    nHalos += this_nHalos
    if(verbose):
        print "File ", ifile," nHalos = ",this_nHalos
    nTreeHalos = numpy.fromfile(f,numpy.int32,this_nTrees)
    output_Halos = numpy.fromfile(f,struct_lgalinput,this_nHalos)
    f.close()
    filename = folder+'/tree_dbids_'+"%03d"%(lastsnap)+'.'+"%d"%(ifile)
    f = open(filename,"rb")
    output_HaloIDs = numpy.fromfile(f,struct_lgaldbidsinput,this_nHalos)
    f.close()
    return (nTrees,nHalos,nTreeHalos,output_Halos,output_HaloIDs)
    def __parse_data(self):
        print 'Reading ', self._filepath
        f = open(self._filepath, 'rb')

        samp_per_segment = 64
        bytes_per_sample = 2
        channels = 2
        tcd_dtype= 'int16'
        f_size = os.path.getsize(self._filepath)
        segments = f_size / ( samp_per_segment * bytes_per_sample * channels )
        self._progress_bar.setMinimum(0)
        self._progress_bar.setMaximum(segments)
        self._value = 0
        self._progress_bar.setValue(self._value)
        chan1 = numpy.array([], dtype=tcd_dtype)
        chan2 = numpy.array([], dtype=tcd_dtype)
        data  = numpy.zeros((samp_per_segment), dtype=tcd_dtype)
        for seg in xrange(segments):
            self._value = self._value + 1
            self._progress_bar.setValue(self._value)
            data = numpy.fromfile(f, dtype=tcd_dtype, count=samp_per_segment)
            chan1 = numpy.concatenate((chan1, data.copy()) )
            data = numpy.fromfile(f, dtype=tcd_dtype, count=samp_per_segment)
            chan2 = numpy.concatenate((chan2, data.copy()) )

        f.close()

        chan1 = chan1.astype(float) / 2.0**11 * self._prf/2.0 *154000.0 / self._doppler_freq_1/10**3
        chan2 = chan2.astype(float) / 2.0**11 * self._prf/2.0 *154000.0 / self._doppler_freq_2/ 10**3

        self.chan1 = chan1
        self.chan2 = chan2
Esempio n. 13
0
def openDATfile(filename,ftype,srate=25000):
    fh = open(filename,'r')
    fh.seek(0)
    if ftype == 'amp':
        data = np.fromfile(fh, dtype=np.int16)
        fh.close()
        data = np.double(data)
        data *= 0.195 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
    elif ftype == 'adc':
        data = np.fromfile(fh, dtype=np.uint16)
        fh.close()
        data = np.double(data)
        data *= 0.000050354 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
        data -= np.mean(data)
    
    elif ftype == 'aux':
        data = np.fromfile(fh, dtype=np.uint16)
        fh.close()
        data = np.double(data)
        data *= 0.0000748 # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
        
    elif ftype == 'time':
        data = np.fromfile(fh, dtype=np.int32)
        fh.close()
        data = np.double(data)
        data /= srate # according the Intan, the output should be multiplied by 0.195 to be converted to micro-volts
    return data
Esempio n. 14
0
def load_matrix_csr(path, verbose=False):
    t_start = time.time()
    data = np.fromfile(
        open(
            os.path.join(
                path,
                "bigrams.data.bin")),
        dtype=np.float32)
    col_ind = np.fromfile(
        open(
            os.path.join(
                path,
                "bigrams.col_ind.bin")),
        dtype=np.int64)
    row_ptr = np.fromfile(
        open(
            os.path.join(
                path,
                "bigrams.row_ptr.bin")),
        dtype=np.int64)
    dim = row_ptr.shape[0] - 1
    cooccurrence = scipy.sparse.csr_matrix(
        (data, col_ind, row_ptr), shape=(
            dim, dim), dtype=np.float32)
    t_end = time.time()
    if verbose:
        print("Matrix loaded in {0:0.2f} sec".format(t_end - t_start))
        print_stats(cooccurrence)
    return cooccurrence
Esempio n. 15
0
	def read_from_file(self, filename):
		'''
		Read data from file. Sets the instance variables
		self.raw_velocity and self.kmsrho8
		
		Parameters:
			* filename (string): the file to read from.
		Returns:
			Nothing
		'''
		print_msg('Reading velocity file: %s...' % filename)
		self.filename = filename

		#Read raw data from velocity file
		f = open(filename, 'rb')
		temp_mesh = np.fromfile(f, count=3, dtype='int32')
		self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh
		self.raw_velocity = np.fromfile(f, dtype='float32').astype('float64')
		f.close()
		self.raw_velocity = self.raw_velocity.reshape((3, self.mesh_x, self.mesh_y, self.mesh_z), order='F')

		#Store the redshift from the filename
		try:
			import os.path
			name = os.path.split(filename)[1]
			self.z = float(name.split('v_')[0])
		except:
			print_msg('Could not determine redshift from file name')
			self.z = -1

		#Convert to kms/s*(rho/8)
		self.kmsrho8 = self.raw_velocity*conv.velconvert(z = self.z)


		print_msg('...done')
Esempio n. 16
0
def read(digits, dataset = "training", path = "."):
    """
    Python function for importing the MNIST data set.
    """
    if dataset is "training":
        fname_img = os.path.join(path, 'MNIST_data/train-images-idx3-ubyte')
        fname_lbl = os.path.join(path, 'MNIST_data/train-labels-idx1-ubyte')
    elif dataset is "testing":
        fname_img = os.path.join(path, 'MNIST_data/t10k-images-idx3-ubyte')
        fname_lbl = os.path.join(path, 'MNIST_data/t10k-labels-idx1-ubyte')
    else:
        raise ValueError, "dataset must be 'testing' or 'training'"

    flbl = open(fname_lbl, 'rb')
    magic_nr, size = struct.unpack(">II", flbl.read(8))
    lbl = np.fromfile(flbl, dtype=np.int8)
    flbl.close()

    fimg = open(fname_img, 'rb')
    magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
    img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)
    fimg.close()

    ind = [ k for k in xrange(size) if lbl[k] in digits ]
    images =  np.zeros((len(ind),rows,cols))
    labels = np.zeros((len(ind),1))
    for i in xrange(len(ind)):
        images[i, :] = img[ ind[i],:,:]
        labels[i] = lbl[ind[i]]

    return images, labels
Esempio n. 17
0
    def test_masked_gauss(self):
        data = numpy.ones((50, 10))
        data[:, 5:] = 2
        lons = numpy.fromfunction(lambda y, x: 3 + x, (50, 10))
        lats = numpy.fromfunction(lambda y, x: 75 - y, (50, 10))
        swath_def = geometry.SwathDefinition(lons=lons, lats=lats)
        mask = numpy.ones((50, 10))
        mask[:, :5] = 0
        masked_data = numpy.ma.array(data, mask=mask)
        res = kd_tree.resample_gauss(swath_def, masked_data.ravel(),
                                     self.area_def, 50000, 25000, segments=1)
        expected_mask = numpy.fromfile(os.path.join(os.path.dirname(__file__),
                                                    'test_files',
                                                    'mask_test_mask.dat'),
                                       sep=' ').reshape((800, 800))
        expected_data = numpy.fromfile(os.path.join(os.path.dirname(__file__),
                                                    'test_files',
                                                    'mask_test_data.dat'),
                                       sep=' ').reshape((800, 800))
        expected = expected_data.sum()
        cross_sum = res.data.sum()

        self.assertTrue(numpy.array_equal(expected_mask, res.mask),
                        msg='Gauss resampling of swath mask failed')
        self.assertAlmostEqual(cross_sum, expected, places=3,
                               msg='Gauss resampling of swath masked data failed')
Esempio n. 18
0
def _array_from_file(infile, dtype, count, sep):
    """Create a numpy array from a file or a file-like object."""

    if isfile(infile):

        global CHUNKED_FROMFILE
        if CHUNKED_FROMFILE is None:
            if (sys.platform == 'darwin' and
                    LooseVersion(platform.mac_ver()[0]) < LooseVersion('10.9')):
                CHUNKED_FROMFILE = True
            else:
                CHUNKED_FROMFILE = False

        if CHUNKED_FROMFILE:
            chunk_size = int(1024 ** 3 / dtype.itemsize)  # 1Gb to be safe
            if count < chunk_size:
                return np.fromfile(infile, dtype=dtype, count=count, sep=sep)
            else:
                array = np.empty(count, dtype=dtype)
                for beg in range(0, count, chunk_size):
                    end = min(count, beg + chunk_size)
                    array[beg:end] = np.fromfile(infile, dtype=dtype, count=end - beg, sep=sep)
                return array
        else:
            return np.fromfile(infile, dtype=dtype, count=count, sep=sep)
    else:
        # treat as file-like object with "read" method; this includes gzip file
        # objects, because numpy.fromfile just reads the compressed bytes from
        # their underlying file object, instead of the decompressed bytes
        read_size = np.dtype(dtype).itemsize * count
        s = infile.read(read_size)
        return np.fromstring(s, dtype=dtype, count=count, sep=sep)
Esempio n. 19
0
  def _RunWebRtcApmVad(self, wav_file_path):
    # Create temporary output path.
    tmp_path = tempfile.mkdtemp()
    output_file_path_probs = os.path.join(
        tmp_path, os.path.split(wav_file_path)[1] + '_vad_probs.tmp')
    output_file_path_rms = os.path.join(
        tmp_path, os.path.split(wav_file_path)[1] + '_vad_rms.tmp')

    # Call WebRTC VAD.
    try:
      subprocess.call([
          self._VAD_WEBRTC_APM_PATH,
          '-i', wav_file_path,
          '-o_probs', output_file_path_probs,
          '-o_rms', output_file_path_rms
      ], cwd=self._VAD_WEBRTC_PATH)

      # Parse annotations.
      self._apm_vad_probs = np.fromfile(output_file_path_probs, np.double)
      self._apm_vad_rms = np.fromfile(output_file_path_rms, np.double)
      assert len(self._apm_vad_rms) == len(self._apm_vad_probs)

    except Exception as e:
      logging.error('Error while running the WebRTC APM VAD (' +
                    e.message + ')')
    finally:
      if os.path.exists(tmp_path):
        shutil.rmtree(tmp_path)
Esempio n. 20
0
 def parseDataFile(self, uLong, precision, encoding, dataFile):
   assert uLong in ['uint32', 'uint64']
   assert precision in ['single', 'double']
   assert encoding in ['BigEndian', 'LittleEndian']
   
   require_numpy()
   
   fd = file(dataFile, 'rb')
   
   byteorder = {'LittleEndian': '<', 'BigEndian': '>'}[encoding]
   unsignedLongTypeString = {'uint32': 'u4', 'uint64': 'u8'}[uLong]
   realTypeString = {'single': 'f4', 'double': 'f8'}[precision]
   
   ulongDType = numpy.dtype(byteorder + unsignedLongTypeString)
   floatDType = numpy.dtype(byteorder + realTypeString)
   
   independentGeometry = []
   
   for independentVariable in self.independentVariables:
     size = numpy.fromfile(fd, dtype=ulongDType, count=1)
     independentGeometry.append(size)
     assert size == independentVariable['length']
     a = numpy.fromfile(fd, dtype=floatDType, count=size)
     independentVariable['array'] = a
   
   if len(independentGeometry) == 0:
     independentGeometry.append(1)
   
   for dependentVariable in self.dependentVariables:
     size = numpy.fromfile(fd, dtype=ulongDType, count=1)
     a = numpy.fromfile(fd, dtype=floatDType, count=size)
     assert a.size == size, "Data file %s has incorrect size. Variable '%s' wasn't written completely." % (dataFile, dependentVariable['name'])
     dependentVariable['array'] = a.reshape(*independentGeometry)
Esempio n. 21
0
    def read_record(self, dtype='b1'):
        """
        Read and return a record of numpy type dtype from the current file.

        If the record is a single value, it is returned.
        Otherwise, a numpy.ndarray is returned.

        dtype is the data type to read (Python type or numpy dtype or string
        identifier).
        """
        if self._mode != 'r' and self._mode != 'rb':
            raise FortranIOException('Not in read mode')

        dtype = np.dtype(dtype)

        nbytes = self._read_control()
        nitems = nbytes // dtype.itemsize
        if nbytes % dtype.itemsize != 0:
            raise FortranIOException('Record size not valid for data type')

        if nitems > 1:
            data = np.fromfile(self._file, dtype, nitems)
        else:
            data = np.fromfile(self._file, dtype, nitems)[0]

        nbytes2 = self._read_control()
        if nbytes != nbytes2:
            raise FortranIOException('Record head and tail mismatch')

        return data
Esempio n. 22
0
def load_ply(fn):
    f = open(fn, 'rb')
    prop_to_dtype = { 'float' : np.float32, 'int' : np.int32, 'uchar' : np.uint8 }

    header = []
    while True:
        s = f.readline().split()
        header.append(s)
        if s[0] == 'end_header':
            break

    it = iter(header)
    s = it.next()
    elements = {}
    while True:
        if s[0] == 'end_header':
            break
        if s[0] == 'element':
            el_name, el_len = s[1], int(s[2])
            el_props = []
            s = it.next()
            while s[0] == 'property':
                el_props.append( s )
                s = it.next()
            if el_name == 'face':
                el_type = np.dtype( [('count', np.uint8), ('idx', np.int32, 3)] )
                elements[el_name] = np.fromfile(f, el_type, el_len)['idx'].copy()
            else:
                el_type = np.dtype( [(name, np.dtype(prop_to_dtype[tp])) for _, tp, name in el_props] )
                elements[el_name] = np.fromfile(f, el_type, el_len)
            continue
        s = it.next()
    return elements
Esempio n. 23
0
    def __init__(self, filename, verbose = False):
        super(SpeReader, self).__init__(filename, verbose = verbose)

        # open the file & read the header
        self.header_size = 4100
        self.fileptr = open(filename, "rb")

        self.fileptr.seek(42)
        self.image_width = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
        self.fileptr.seek(656)
        self.image_height = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
        self.fileptr.seek(1446)
        self.number_frames = int(numpy.fromfile(self.fileptr, numpy.uint32, 1)[0])

        self.fileptr.seek(108)
        image_mode = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
        if (image_mode == 0):
            self.image_size = 4 * self.image_width * self.image_height
            self.image_mode = numpy.float32
        elif (image_mode == 1):
            self.image_size = 4 * self.image_width * self.image_height
            self.image_mode = numpy.uint32
        elif (image_mode == 2):
            self.image_size = 2 * self.image_width * self.image_height
            self.image_mode = numpy.int16
        elif (image_mode == 3):
            self.image_size = 2 * self.image_width * self.image_height
            self.image_mode = numpy.uint16
        else:
            print("unrecognized spe image format: ", image_mode)
def parseData(dataset='testing', path='.'):
	'''
	parseData - Parses a file into matrices
	Input - the name of file to be parsed
	Output - The data in matrix representation

	'''

	if dataset == 'training':
		image_file = os.path.join(path, 'train-images-idx3-ubyte')
		label_file = os.path.join(path, 'train-labels-idx1-ubyte')
	elif dataset == 'testing':
		image_file = os.path.join(path, 't10k-images-idx3-ubyte')
		label_file = os.path.join(path, 't10k-labels-idx1-ubyte')
	else:
		raise(ValueError, "'dataset' must be in testing or 'training'")

	# get the matrix for image data
	f_img = open(image_file, 'rb')
	magic_nr, size = struct.unpack(">II", f_img.read(8))  # parse the magic number, & size of dataset
	dim_x, dim_y = struct.unpack(">II", f_img.read(8))  # get the dimensions of each handwritten num
	X = np.fromfile(f_img, dtype=np.dtype('B'))
	X = X.reshape(size, dim_x * dim_y)


	# get the matrix for label data
	f_lbl = open(label_file, 'rb')
	magic_nr, size = struct.unpack(">II", f_lbl.read(8)) # only magic # and size of dataset
	y = np.fromfile(f_lbl, dtype=np.dtype('B'))
	#X[X > 1] = 1
	return X, y
Esempio n. 25
0
    def __init__(self, filename, xml):
        DataReader.__init__(self, filename, xml)
        
        # Open the file & read the header.
        self.header_size = 4100
        self.fileptr = open(filename, "rb")

        # FIXME: Should check that these match the XML file.        
        self.fileptr.seek(42)
        self.image_width = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
        self.fileptr.seek(656)
        self.image_height = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
        self.fileptr.seek(1446)
        self.number_frames = int(numpy.fromfile(self.fileptr, numpy.uint32, 1)[0])

        self.fileptr.seek(108)
        image_mode = int(numpy.fromfile(self.fileptr, numpy.uint16, 1)[0])
        if (image_mode == 0):
            self.image_size = 4 * self.image_width * self.image_height
            self.image_mode = numpy.float32
        elif (image_mode == 1):
            self.image_size = 4 * self.image_width * self.image_height
            self.image_mode = numpy.uint32
        elif (image_mode == 2):
            self.image_size = 2 * self.image_width * self.image_height
            self.image_mode = numpy.int16
        elif (image_mode == 3):
            self.image_size = 2 * self.image_width * self.image_height
            self.image_mode = numpy.uint16
        else:
            print "unrecognized spe image format: ", image_mode
Esempio n. 26
0
def getPressureData(fullFileName):
    itemsize = os.path.getsize(fullFileName)/8/2
    fp = open(fullFileName, 'r')
    time = np.fromfile(fp, dtype='Float64', count=itemsize);
    pressure = np.fromfile(fp, dtype='Float64', count=itemsize);
    fp.close()
    return (time, pressure)
Esempio n. 27
0
def readFlowFile(file_name,flip=False):
    data2D=None
    with open(file_name,'rb') as f:
        magic = np.fromfile(f, np.float32, count=1)
        if 202021.25 != magic:
            print 'Magic number incorrect. Invalid .flo file'
        else:
            w = np.fromfile(f, np.int32, count=1)
            h = np.fromfile(f, np.int32, count=1)
            if w.size==0 or h.size==0:
                # print type(w),type(h),w,h
                data2D=None;
            else:               
                # print (w, h)
                data = np.fromfile(f, np.float32, count=2*w*h)
                # Reshape data into 3D array (columns, rows, bands)
                # if flip is True:
                #     data2D = np.resize(data, (w, h, 2))
                #     data2D = data2D.
                #     data2D = np.reshape(data, (h, w, 2))
                #     # ,order='F')
                # else:
                data2D = np.reshape(data, (h, w, 2))
                # print data2D.shape
    return data2D
Esempio n. 28
0
def sorted_indexes(filename,data_type=np.uint32):

    if os.path.exists(filename):
        curfilename = filename
    elif os.path.exists(filename+".0"):
        curfilename = filename+".0"
    else:
        print "file not found:", filename
        sys.exit()

    f=open(curfilename,'rb')
    number_of_files=np.fromfile(f,dtype=np.uint32,count=1)[0]
    dims=np.fromfile(f,dtype=np.uint32,count=1)[0]
    dims3=dims**3
    total_size=np.fromfile(f,dtype=data_type,count=dims3)
    total_array=[]
    for j in range(dims3):
        total_array.append(np.empty(total_size[j],dtype=data_type))
    f.close()
    total_array=np.array(total_array)

    offset=np.zeros(dims3,dtype=data_type)
    for i in range(number_of_files):
        curfilename=filename+'.'+str(i)
        f=open(curfilename,'rb')
        f.seek(4*(2+dims3),os.SEEK_CUR)
        for j in range(dims3):
            size=np.fromfile(f,dtype=data_type,count=1)[0]
            array=np.fromfile(f,dtype=data_type,count=size)
            total_array[j][offset[j]:offset[j]+size]=array
            offset[j]+=size
        f.close()

    return total_array
Esempio n. 29
0
def readFLO(path):
    f = open(path, 'rb')
    
    # Read magic number ("PIEH" in ASCII = float 202021.25)
    magic = np.fromfile(f, np.float32, count=1)

    if magic != 202021.25:
        raise Exception('Invalid .flo file') 

    # Read width
    f.seek(4)
    w = np.fromfile(f, np.int32, count=1)
    
    # Read height
    f.seek(8)
    h = np.fromfile(f, np.int32, count=1)
    
    # Read (u,v) coordinates
    f.seek(12)
    data = np.fromfile(f, np.float32, count=w*h*2)

    # Close file (.flo)
    f.close()

    # Reshape data into 3D array (columns, rows, bands)
    dataM = np.resize(data, (h, w, 2))

    # Extract u and v coordinates
    u = dataM[:,:,0]
    v = dataM[:,:,1]
    
    return w,h,u,v
Esempio n. 30
0
def read(dataset = "training", path = "."):
    """
    Python function for importing the MNIST data set.  It returns an iterator
    of 2-tuples with the first element being the label and the second element
    being a numpy.uint8 2D array of pixel data for the given image.
    """

    if dataset is "training":
        fname_img = os.path.join(path, 'train-images-idx3-ubyte')
        fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')
    elif dataset is "testing":
        fname_img = os.path.join(path, 't10k-images-idx3-ubyte')
        fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')
    else:
        raise ValueError, "dataset must be 'testing' or 'training'"

    # Load everything in some numpy arrays
    with open(fname_lbl, 'rb') as flbl:
        magic, num = struct.unpack(">II", flbl.read(8))
        lbl = np.fromfile(flbl, dtype=np.int8)

    with open(fname_img, 'rb') as fimg:
        magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
        img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)

    get_img = lambda idx: (lbl[idx], img[idx])

    # Create an iterator which returns each image in turn
    for i in xrange(len(lbl)):
        yield get_img(i)
Esempio n. 31
0
 def my_fread(*x, **y):
     return np.fromfile(*x, **y)[0]
Esempio n. 32
0
def read_geometry(filepath, read_metadata=False, read_stamp=False):
    """Read a triangular format Freesurfer surface mesh.

    Parameters
    ----------
    filepath : str
        Path to surface file.
    read_metadata : bool, optional
        If True, read and return metadata as key-value pairs.

        Valid keys:

        * 'head' : array of int
        * 'valid' : str
        * 'filename' : str
        * 'volume' : array of int, shape (3,)
        * 'voxelsize' : array of float, shape (3,)
        * 'xras' : array of float, shape (3,)
        * 'yras' : array of float, shape (3,)
        * 'zras' : array of float, shape (3,)
        * 'cras' : array of float, shape (3,)

    read_stamp : bool, optional
        Return the comment from the file

    Returns
    -------
    coords : numpy array
        nvtx x 3 array of vertex (x, y, z) coordinates.
    faces : numpy array
        nfaces x 3 array of defining mesh triangles.
    volume_info : OrderedDict
        Returned only if `read_metadata` is True.  Key-value pairs found in the
        geometry file.
    create_stamp : str
        Returned only if `read_stamp` is True.  The comment added by the
        program that saved the file.
    """
    volume_info = OrderedDict()

    TRIANGLE_MAGIC = 16777214
    QUAD_MAGIC = 16777215
    NEW_QUAD_MAGIC = 16777213
    with open(filepath, "rb") as fobj:
        magic = _fread3(fobj)
        if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC):  # Quad file
            nvert = _fread3(fobj)
            nquad = _fread3(fobj)
            (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
            coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div
            coords = coords.reshape(-1, 3)
            quads = _fread3_many(fobj, nquad * 4)
            quads = quads.reshape(nquad, 4)
            #
            #   Face splitting follows
            #
            faces = np.zeros((2 * nquad, 3), dtype=int)
            nface = 0
            for quad in quads:
                if (quad[0] % 2) == 0:
                    faces[nface] = quad[0], quad[1], quad[3]
                    nface += 1
                    faces[nface] = quad[2], quad[3], quad[1]
                    nface += 1
                else:
                    faces[nface] = quad[0], quad[1], quad[2]
                    nface += 1
                    faces[nface] = quad[0], quad[2], quad[3]
                    nface += 1

        elif magic == TRIANGLE_MAGIC:  # Triangle file
            create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
            fobj.readline()
            vnum = np.fromfile(fobj, ">i4", 1)[0]
            fnum = np.fromfile(fobj, ">i4", 1)[0]
            coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
            faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)

            if read_metadata:
                volume_info = _read_volume_info(fobj)
        else:
            raise ValueError("File does not appear to be a Freesurfer surface")

    coords = coords.astype(np.float64)  # XXX: due to mayavi bug on mac 32bits

    ret = (coords, faces)
    if read_metadata:
        if len(volume_info) == 0:
            warnings.warn('No volume information contained in the file')
        ret += (volume_info, )
    if read_stamp:
        ret += (create_stamp, )

    return ret
Esempio n. 33
0
def load_velo_scan(file):
    '''Load and parse a velodyne binary file'''
    scan = np.fromfile(file, dtype=np.float32)
    return scan.reshape((-1, 4))
Esempio n. 34
0
def readsu(filename,nx,ny,nz,timeslice):
   ff=open(filename,mode='rb')
   ff.seek(8*timeslice*nx*ny*nz)
   field = np.fromfile(ff,dtype='float64',count=nx*ny*nz).reshape(field,(nx,ny,nz),order='F')
   ff.close()
   return field
Esempio n. 35
0
    def __init__(self,
                 basedir,
                 snapnum,
                 long_ids=False,
                 swap=False,
                 SFR=False,
                 read_IDs=True,
                 prefix='/groups_'):

        if long_ids: format = np.uint64
        else: format = np.uint32

        exts = ('000' + str(snapnum))[-3:]

        #################  READ TAB FILES #################
        fnb, skip, Final = 0, 0, False
        dt1 = np.dtype((np.float32, 3))
        dt2 = np.dtype((np.float32, 6))
        prefix = basedir + prefix + exts + "/group_tab_" + exts + "."
        while not (Final):
            f = open(prefix + str(fnb), 'rb')
            self.Ngroups = np.fromfile(f, dtype=np.int32, count=1)[0]
            self.TotNgroups = np.fromfile(f, dtype=np.int32, count=1)[0]
            self.Nids = np.fromfile(f, dtype=np.int32, count=1)[0]
            self.TotNids = np.fromfile(f, dtype=np.uint64, count=1)[0]
            self.Nfiles = np.fromfile(f, dtype=np.uint32, count=1)[0]

            TNG, NG = self.TotNgroups, self.Ngroups
            if fnb == 0:
                self.GroupLen = np.empty(TNG, dtype=np.int32)
                self.GroupOffset = np.empty(TNG, dtype=np.int32)
                self.GroupMass = np.empty(TNG, dtype=np.float32)
                self.GroupPos = np.empty(TNG, dtype=dt1)
                self.GroupVel = np.empty(TNG, dtype=dt1)
                self.GroupTLen = np.empty(TNG, dtype=dt2)
                self.GroupTMass = np.empty(TNG, dtype=dt2)
                if SFR: self.GroupSFR = np.empty(TNG, dtype=np.float32)

            if NG > 0:
                locs = slice(skip, skip + NG)
                self.GroupLen[locs] = np.fromfile(f, dtype=np.int32, count=NG)
                self.GroupOffset[locs] = np.fromfile(f,
                                                     dtype=np.int32,
                                                     count=NG)
                self.GroupMass[locs] = np.fromfile(f,
                                                   dtype=np.float32,
                                                   count=NG)
                self.GroupPos[locs] = np.fromfile(f, dtype=dt1, count=NG)
                self.GroupVel[locs] = np.fromfile(f, dtype=dt1, count=NG)
                self.GroupTLen[locs] = np.fromfile(f, dtype=dt2, count=NG)
                self.GroupTMass[locs] = np.fromfile(f, dtype=dt2, count=NG)
                if SFR:
                    self.GroupSFR[locs] = np.fromfile(f,
                                                      dtype=np.float32,
                                                      count=NG)
                skip += NG

                if swap:
                    self.GroupLen.byteswap(True)
                    self.GroupOffset.byteswap(True)
                    self.GroupMass.byteswap(True)
                    self.GroupPos.byteswap(True)
                    self.GroupVel.byteswap(True)
                    self.GroupTLen.byteswap(True)
                    self.GroupTMass.byteswap(True)
                    if SFR: self.GroupSFR.byteswap(True)

            curpos = f.tell()
            f.seek(0, os.SEEK_END)
            if curpos != f.tell():
                raise Exception(
                    "Warning: finished reading before EOF for tab file", fnb)
            f.close()
            fnb += 1
            if fnb == self.Nfiles: Final = True

        #################  READ IDS FILES #################
        if read_IDs:

            fnb, skip = 0, 0
            Final = False
            while not (Final):
                fname = basedir + "/groups_" + exts + "/group_ids_" + exts + "." + str(
                    fnb)
                f = open(fname, 'rb')
                Ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
                TotNgroups = np.fromfile(f, dtype=np.uint32, count=1)[0]
                Nids = np.fromfile(f, dtype=np.uint32, count=1)[0]
                TotNids = np.fromfile(f, dtype=np.uint64, count=1)[0]
                Nfiles = np.fromfile(f, dtype=np.uint32, count=1)[0]
                Send_offset = np.fromfile(f, dtype=np.uint32, count=1)[0]
                if fnb == 0:
                    self.GroupIDs = np.zeros(dtype=format, shape=TotNids)
                if Ngroups > 0:
                    if long_ids:
                        IDs = np.fromfile(f, dtype=np.uint64, count=Nids)
                    else:
                        IDs = np.fromfile(f, dtype=np.uint32, count=Nids)
                    if swap:
                        IDs = IDs.byteswap(True)
                    self.GroupIDs[skip:skip + Nids] = IDs[:]
                    skip += Nids
                curpos = f.tell()
                f.seek(0, os.SEEK_END)
                if curpos != f.tell():
                    raise Exception(
                        "Warning: finished reading before EOF for IDs file",
                        fnb)
                f.close()
                fnb += 1
                if fnb == Nfiles: Final = True
Esempio n. 36
0
    def get_sensor_data(self, query):
        idx = query
        read_test_image = False
        if isinstance(query, dict):
            assert "lidar" in query
            idx = query["lidar"]["idx"]
            read_test_image = "cam" in query

        info = self._nusc_infos[idx]
        res = {
            "lidar": {
                "type": "lidar",
                "points": None,
            },
            "metadata": {
                "token": info["token"],
                "scene_token": info["scene_token"],
            },
        }
        lidar_path = Path(info['lidar_path'])
        points = np.fromfile(str(lidar_path), dtype=np.float32,
                             count=-1).reshape([-1, 5])
        points[:, 3] /= 255
        points[:, 4] = 0
        sweep_points_list = [points]
        ts = info["timestamp"] / 1e6

        for sweep in info["sweeps"]:
            points_sweep = np.fromfile(str(sweep["lidar_path"]),
                                       dtype=np.float32,
                                       count=-1).reshape([-1, 5])
            sweep_ts = sweep["timestamp"] / 1e6
            points_sweep[:, 3] /= 255
            points_sweep[:, :3] = points_sweep[:, :3] @ sweep[
                "sweep2lidar_rotation"].T
            points_sweep[:, :3] += sweep["sweep2lidar_translation"]
            points_sweep[:, 4] = ts - sweep_ts
            sweep_points_list.append(points_sweep)

        points = np.concatenate(sweep_points_list, axis=0)[:, [0, 1, 2, 4]]

        if read_test_image:
            if Path(info["cam_front_path"]).exists():
                with open(str(info["cam_front_path"]), 'rb') as f:
                    image_str = f.read()
            else:
                image_str = None
            res["cam"] = {
                "type": "camera",
                "data": image_str,
                "datatype": Path(info["cam_front_path"]).suffix[1:],
            }
        res["lidar"]["points"] = points
        if 'gt_boxes' in info:
            mask = info["num_lidar_pts"] > 0
            gt_boxes = info["gt_boxes"][mask]
            if self._with_velocity:
                gt_velocity = info["gt_velocity"][mask]
                nan_mask = np.isnan(gt_velocity[:, 0])
                gt_velocity[nan_mask] = [0.0, 0.0]
                gt_boxes = np.concatenate([gt_boxes, gt_velocity], axis=-1)
            res["lidar"]["annotations"] = {
                'boxes': gt_boxes,
                'names': info["gt_names"][mask],
            }
        return res
Esempio n. 37
0
rate_drop_lstm = 0.15 + np.random.rand() * 0.25
rate_drop_dense = 0.15 + np.random.rand() * 0.25

act = 'relu'
re_weight = False  # whether to re-weight classes to fit the 17.5% share in test set
# NOT FOR THE PROJECT BECAUSE NOT USING TEST

STAMP = 'lstm_%d_%d_%.2f_%.2f'%(num_lstm, num_dense, rate_drop_lstm, \
        rate_drop_dense)

########################################
## uploads files
########################################

# upload premade embedding
embedded = np.fromfile(EMBEDDING_FILE, dtype=EMBEDDED_TYPE).reshape(
    (COLUMNS_PREPROCESS, -1)).transpose()
# store pair id and duplicate
pair_id = list(map(int, embedded[:, -1]))
question_id = list(map(int, embedded[:, -2]))
labels = np.array(list(map(int, embedded[:, -3])))

# removes unnecessary column from embedding
embedded = embedded[:, :-3]

data_1 = np.fromfile(Q1_FILE, dtype=EMBEDDED_TYPE).reshape(
    (-1, MAX_SEQUENCE_LENGTH))
data_2 = np.fromfile(Q2_FILE, dtype=EMBEDDED_TYPE).reshape(
    (-1, MAX_SEQUENCE_LENGTH))

########################################
## sample train/validation data
Esempio n. 38
0
print 'Reading data in ' + float_type + ' format from ' + infile

indata.read(1)  # the '_' symbol which starts VTK appended data

bytecount = struct.unpack('i', indata.read(4))[0]

#define the expected structure

if 'little' in endianness.lower():
    float_format = '<'
elif 'big' in endianness.lower():
    float_format = '>'
else:
    raise Exception("Didn't understand endianness of input: " + endiannness)

if '32' in float_type:
    float_format += 'f'
    num_count = bytecount / 4
elif '64' in float_type:
    float_format += 'd'
    num_count = bytecount / 8
else:
    raise Exception("Didn't understand float format for points: " + float_type)

np_data = np.fromfile(indata, dtype=np.dtype(float_format),
                      count=num_count).reshape((-1, 3))

indata.close()

np.savetxt(outfile, np_data, fmt='%16.6f')
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import signal
import librosa
import dft_freq_est
import attack_finder
from common import normalize

def cat_to_cols(x,y):
    return np.concatenate((x[:,None],y[:,None]),axis=1)

do_plot=False

sr=16000
x=np.fromfile('/tmp/snd.f64')
x=normalize(x)

N_W=1024
N_FFT=2048
N_H=2048
n_max=((len(x)-N_W)//N_H)*N_H

peak_thresh=-140

# intialize the sinusoidal analyser
pa=dft_freq_est.peak_analyzer(
N_FFT,
N_W,
N_H)
    def __call__(self, results):
        """Call functions to load image and get image meta information.

        Args:
            results (dict): Result dict from :obj:`mmseg.CustomDataset`.

        Returns:
            dict: The dict contains loaded image and meta information.
        """

        if results.get('img_prefix') is not None:
            filename = osp.join(results['img_prefix'],
                                results['img_info']['filename'])
        else:
            filename = results['img_info']['filename']
        hdr = dict()
        with open(filename) as f:
            for line in f.readlines():
                if '=' not in line:
                    continue
                else:
                    key, value = line.split('=')
                    key = key.strip()
                    value = value.strip()
                    hdr[key] = value
        assert hdr[
            'file type'] == 'ENVI Standard', 'Require ENVI data: file type = ENVI Standard'
        assert hdr['byte order'] == '0', 'Require ENVI data: byte order = 0'
        assert hdr['x start'] == '0', 'Require ENVI data: x start = 0'
        assert hdr['y start'] == '0', 'Require ENVI data: y start = 0'
        assert hdr['interleave'].lower(
        ) == 'bsq', 'Require ENVI data: interleave = bsq'
        assert int(hdr['data type']) <= len(
            self.ENVI_data_type) and self.ENVI_data_type[int(
                hdr['data type'])] != None

        data_type = int(hdr['data type'])
        header_offset = int(hdr['header offset'])
        height = int(hdr['lines'])
        width = int(hdr['samples'])
        bands = int(hdr['bands'])
        if hdr['interleave'].lower() == 'bsq':
            img_bytes = np.fromfile(filename.replace('.hdr', '.raw'),
                                    dtype=self.ENVI_data_type[data_type],
                                    offset=header_offset)
            img_bytes = img_bytes.reshape((bands, height, width))
            img_bytes = img_bytes[self.channel_select, :, :]
            if self.dataset_name == 'cholangiocarcinoma':
                img_bytes = img_bytes[:, ::-1, :]
            img_bytes = np.transpose(img_bytes, (1, 2, 0))
        else:
            img_bytes = np.zeros((height, width, bands),
                                 dtype=self.ENVI_data_type[data_type])
            pass
        if self.to_float32:
            img_bytes = img_bytes.astype(np.float32)
            if self.normalization:

                img_bytes -= self.mean[..., self.channel_select]
                img_bytes /= self.std[..., self.channel_select]
                ############################################3
                # img_bytes *= 16
                # img_bytes += 128
                # img_bytes = img_bytes.astype(np.uint8)
                # img_bytes = img_bytes.astype(np.float32)
                # img_bytes -= 128
                # img_bytes /= 16
                ##############################################
        if self.median_blur:
            for band in range(img_bytes.shape[0]):
                img_bytes[band, :, :] = cv2.medianBlur(img_bytes[band, :, :],
                                                       ksize=3)

        results['filename'] = filename.replace('.hdr', '.png')
        results['ori_filename'] = results['img_info']['filename'].replace(
            '.hdr', '.png')
        results['img'] = img_bytes
        results['img_shape'] = img_bytes.shape
        results['ori_shape'] = img_bytes.shape
        # Set initial values for default meta_keys
        results['pad_shape'] = img_bytes.shape
        results['scale_factor'] = 1.0
        results['channel_select'] = self.channel_select
        results['channel_to_show'] = self.channel_to_show
        num_channels = 1 if len(img_bytes.shape) < 3 else img_bytes.shape[2]
        mean = np.ones(num_channels, dtype=np.float32) * 128
        std = np.ones(num_channels, dtype=np.float32) * 16
        results['img_norm_cfg'] = dict(mean=mean, std=std, to_rgb=False)
        return results
Esempio n. 41
0
 def _is_valid(cls, filename, *args, **kwargs):
     """
     Defined for the NMSU file naming scheme.
     This could differ for other formats.
     """
     f = str(filename)
     prefix, suffix = filename_pattern["particle_data"]
     if not os.path.isfile(f):
         return False
     if not f.endswith(suffix):
         return False
     if "s0" not in f:
         # ATOMIC.DAT, for instance, passes the other tests, but then dies
         # during _find_files because it can't be split.
         return False
     with open(f, "rb") as fh:
         try:
             amr_prefix, amr_suffix = filename_pattern["amr"]
             possibles = glob.glob(os.path.dirname(os.path.abspath(f)) + "/*")
             for possible in possibles:
                 if possible.endswith(amr_suffix):
                     if os.path.basename(possible).startswith(amr_prefix):
                         return False
         except Exception:
             pass
         try:
             seek = 4
             fh.seek(seek)
             headerstr = np.fromfile(fh, count=1, dtype=(str, 45))  # NOQA
             aexpn = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             aexp0 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             amplt = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             astep = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             istep = np.fromfile(fh, count=1, dtype=">i4")  # NOQA
             partw = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             tintg = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             ekin = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             ekin1 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             ekin2 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             au0 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             aeu0 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             nrowc = np.fromfile(fh, count=1, dtype=">i4")  # NOQA
             ngridc = np.fromfile(fh, count=1, dtype=">i4")  # NOQA
             nspecs = np.fromfile(fh, count=1, dtype=">i4")  # NOQA
             nseed = np.fromfile(fh, count=1, dtype=">i4")  # NOQA
             Om0 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             Oml0 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             hubble = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             Wp5 = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             Ocurv = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             wspecies = np.fromfile(fh, count=10, dtype=">f4")  # NOQA
             lspecies = np.fromfile(fh, count=10, dtype=">i4")  # NOQA
             extras = np.fromfile(fh, count=79, dtype=">f4")  # NOQA
             boxsize = np.fromfile(fh, count=1, dtype=">f4")  # NOQA
             return True
         except Exception:
             return False
     return False
Esempio n. 42
0
ref_sqrt = numpy.zeros((128, 128))
bg_seq = numpy.zeros((128, 128, positions))

#call .exe file to do imaging
#subprocess.call([r"qcl-holo-cap.exe", data_dir, '--zstep', str(zstep), '--positions', str(positions), '--frames', str(num_frames), '--laserpower', str(lp), '--inte', str(inte)])

#read in reference image
re_dir = r'C:\Users\shihao\Desktop\ir-images\ir-holography\bead5\ref'
re_list = os.listdir(re_dir)
number_res = len(re_list)
if( number_res != 20):
    print("Error: Reference Image is missing!")
else:
    for i in range(number_res):
        ref_dir = re_dir + '\\' + re_list[i]
        ref_data = numpy.fromfile(ref_dir, dtype = numpy.uint16, count = -1, sep = '')
        ref_data = numpy.reshape(ref_data, (128, 128), order = 'C')
        ref_seq[:, :, i] = ref_data
        
    ref_intensity = sum(ref_seq, axis = 2) / positions
    ref_sqrt = numpy.sqrt(ref_intensity)
    
#read in background image
bgs_dir = r'C:\Users\shihao\Desktop\ir-images\ir-holography\bead5\bg_holo'
bgs_list = os.listdir(bgs_dir)
number_bgs = len(bgs_list)
if( number_bgs != positions):
    print("Error: Background Image is missing!")
else:
    for i in range(number_bgs):
        bg_dir = bgs_dir + '\\' + bgs_list[i]
Esempio n. 43
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.domain_left_edge = np.zeros(3, dtype="float")
        self.domain_right_edge = np.zeros(3, dtype="float") + 1.0
        self.dimensionality = 3
        self.refine_by = 2
        self._periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.parameters.update(constants)
        self.parameters["Time"] = 1.0
        # read the amr header
        with open(self._file_amr, "rb") as f:
            amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">")
            n_to_skip = len(("tl", "dtl", "tlold", "dtlold", "iSO"))
            fpu.skip(f, n_to_skip, endian=">")
            (self.ncell) = fpu.read_vector(f, "i", ">")[0]
            # Try to figure out the root grid dimensions
            est = int(np.rint(self.ncell ** (1.0 / 3.0)))
            # Note here: this is the number of *cells* on the root grid.
            # This is not the same as the number of Octs.
            # domain dimensions is the number of root *cells*
            self.domain_dimensions = np.ones(3, dtype="int64") * est
            self.root_grid_mask_offset = f.tell()
            self.root_nocts = self.domain_dimensions.prod() // 8
            self.root_ncells = self.root_nocts * 8
            mylog.debug(
                "Estimating %i cells on a root grid side, %i root octs",
                est,
                self.root_nocts,
            )
            self.root_iOctCh = fpu.read_vector(f, "i", ">")[: self.root_ncells]
            self.root_iOctCh = self.root_iOctCh.reshape(
                self.domain_dimensions, order="F"
            )
            self.root_grid_offset = f.tell()
            self.root_nhvar = fpu.skip(f, endian=">")
            self.root_nvar = fpu.skip(f, endian=">")
            # make sure that the number of root variables is a multiple of
            # rootcells
            assert self.root_nhvar % self.root_ncells == 0
            assert self.root_nvar % self.root_ncells == 0
            self.nhydro_variables = (
                self.root_nhvar + self.root_nvar
            ) / self.root_ncells
            self.iOctFree, self.nOct = fpu.read_vector(f, "i", ">")
            self.child_grid_offset = f.tell()
            # lextra needs to be loaded as a string, but it's actually
            # array values.  So pop it off here, and then re-insert.
            lextra = amr_header_vals.pop("lextra")
            amr_header_vals["lextra"] = np.frombuffer(lextra, ">f4")
            self.parameters.update(amr_header_vals)
            amr_header_vals = None
            # estimate the root level
            float_center, fl, iocts, nocts, root_level = _read_art_level_info(
                f, [0, self.child_grid_offset], 1, coarse_grid=self.domain_dimensions[0]
            )
            del float_center, fl, iocts, nocts
            self.root_level = root_level
            mylog.info("Using root level of %02i", self.root_level)
        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        if not self.skip_particles and self._file_particle_header:
            with open(self._file_particle_header, "rb") as fh:
                particle_header_vals = fpu.read_attrs(fh, particle_header_struct, ">")
                fh.seek(seek_extras)
                n = particle_header_vals["Nspecies"]
                wspecies = np.fromfile(fh, dtype=">f", count=10)
                lspecies = np.fromfile(fh, dtype=">i", count=10)
                # extras needs to be loaded as a string, but it's actually
                # array values.  So pop it off here, and then re-insert.
                extras = particle_header_vals.pop("extras")
                particle_header_vals["extras"] = np.frombuffer(extras, ">f4")
            self.parameters["wspecies"] = wspecies[:n]
            self.parameters["lspecies"] = lspecies[:n]
            for specie in range(n):
                self.particle_types.append("specie%i" % specie)
            self.particle_types_raw = tuple(self.particle_types)
            ls_nonzero = np.diff(lspecies)[: n - 1]
            ls_nonzero = np.append(lspecies[0], ls_nonzero)
            self.star_type = len(ls_nonzero)
            mylog.info("Discovered %i species of particles", len(ls_nonzero))
            info_str = "Particle populations: " + "%9i " * len(ls_nonzero)
            mylog.info(info_str, *ls_nonzero)
            self._particle_type_counts = dict(zip(self.particle_types_raw, ls_nonzero))
            for k, v in particle_header_vals.items():
                if k in self.parameters.keys():
                    if not self.parameters[k] == v:
                        mylog.info(
                            "Inconsistent parameter %s %1.1e  %1.1e",
                            k,
                            v,
                            self.parameters[k],
                        )
                else:
                    self.parameters[k] = v
            self.parameters_particles = particle_header_vals
            self.parameters.update(particle_header_vals)
            self.parameters["ng"] = self.parameters["Ngridc"]
            self.parameters["ncell0"] = self.parameters["ng"] ** 3

        # setup standard simulation params yt expects to see
        self.current_redshift = self.parameters["aexpn"] ** -1.0 - 1.0
        self.omega_lambda = self.parameters["Oml0"]
        self.omega_matter = self.parameters["Om0"]
        self.hubble_constant = self.parameters["hubble"]
        self.min_level = self.parameters["min_level"]
        self.max_level = self.parameters["max_level"]
        if self.limit_level is not None:
            self.max_level = min(self.limit_level, self.parameters["max_level"])
        if self.force_max_level is not None:
            self.max_level = self.force_max_level
        self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
        self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr")
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
Esempio n. 44
0
    def restore_weights(self, weights_file):
        """Load previously trained model weights
        Arguments: 
            weights_file: beginning by project root this is the path 
                                   where is save your weights; example: "weights/weights_01.h5"
        """
        tf.keras.backend.clear_session()  # used to reset layer names
        # load Darknet original weights to TensorFlow model
        range1 = 75 if self.version == "yolov3" else 13
        range2 = [58, 66, 74] if self.version == "yolov3" else [9, 12]

        with open(weights_file, 'rb') as wf:
            major, minor, revision, seen, _ = np.fromfile(wf,
                                                          dtype=np.int32,
                                                          count=5)

            j = 0
            for i in range(range1):
                if i > 0:
                    conv_layer_name = 'conv2d_%d' % i
                else:
                    conv_layer_name = 'conv2d'

                if j > 0:
                    bn_layer_name = 'batch_normalization_%d' % j
                else:
                    bn_layer_name = 'batch_normalization'

                conv_layer = self.model.get_layer(conv_layer_name)
                filters = conv_layer.filters
                k_size = conv_layer.kernel_size[0]
                in_dim = conv_layer.input_shape[-1]

                if i not in range2:
                    # darknet weights: [beta, gamma, mean, variance]
                    bn_weights = np.fromfile(wf,
                                             dtype=np.float32,
                                             count=4 * filters)
                    # tf weights: [gamma, beta, mean, variance]
                    bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
                    bn_layer = self.model.get_layer(bn_layer_name)
                    j += 1
                else:
                    conv_bias = np.fromfile(wf,
                                            dtype=np.float32,
                                            count=filters)

                # darknet shape (out_dim, in_dim, height, width)
                conv_shape = (filters, in_dim, k_size, k_size)
                conv_weights = np.fromfile(wf,
                                           dtype=np.float32,
                                           count=np.product(conv_shape))
                # tf shape (height, width, in_dim, out_dim)
                conv_weights = conv_weights.reshape(conv_shape).transpose(
                    [2, 3, 1, 0])

                if i not in range2:
                    conv_layer.set_weights([conv_weights])
                    bn_layer.set_weights(bn_weights)
                else:
                    conv_layer.set_weights([conv_weights, conv_bias])

            assert len(wf.read()) == 0, 'failed to read all data'
Esempio n. 45
0
def readTrc( fName ):
    """
        Reads .trc binary files from LeCroy Oscilloscopes.
        Decoding is based on LECROY_2_3 template.
        [More info](http://forums.ni.com/attachments/ni/60/4652/2/LeCroyWaveformTemplate_2_3.pdf)

        Parameters
        -----------
        fName = filename of the .trc file

        Returns
        -----------
        x: array with sample times [s],

        y: array with sample  values [V],

        d: dictionary with metadata


        M. Betz 09/2015
    """
    with open(fName, "rb") as fid:
        data = fid.read(50).decode()
        wdOffset = data.find('WAVEDESC')

        #------------------------
        # Get binary format / endianess
        #------------------------
        if readX( fid, '?', wdOffset + 32 ):  #16 or 8 bit sample format?
            smplFmt = "int16"
        else:
            smplFmt = "int8"
        if readX( fid, '?', wdOffset + 34 ):  #Big or little endian?
            endi = "<"
        else:
            endi = ">"

        #------------------------
        # Get length of blocks and arrays:
        #------------------------
        lWAVE_DESCRIPTOR = readX( fid, endi+"l", wdOffset + 36 )
        lUSER_TEXT       = readX( fid, endi+"l", wdOffset + 40 )
        lTRIGTIME_ARRAY  = readX( fid, endi+"l", wdOffset + 48 )
        lRIS_TIME_ARRAY  = readX( fid, endi+"l", wdOffset + 52 )
        lWAVE_ARRAY_1    = readX( fid, endi+"l", wdOffset + 60 )
        lWAVE_ARRAY_2    = readX( fid, endi+"l", wdOffset + 64 )

        d = dict()  #Will store all the extracted Metadata

        #------------------------
        # Get Instrument info
        #------------------------
        d["INSTRUMENT_NAME"]  = readX( fid, "16s",    wdOffset + 76 ).decode().split('\x00')[0]
        d["INSTRUMENT_NUMBER"]= readX( fid, endi+"l", wdOffset + 92 )
        d["TRACE_LABEL"]      = readX( fid, "16s",    wdOffset + 96 ).decode().split('\x00')[0]

        #------------------------
        # Get Waveform info
        #------------------------
        d["WAVE_ARRAY_COUNT"] = readX( fid, endi+"l", wdOffset +116 )
        d["PNTS_PER_SCREEN"]  = readX( fid, endi+"l", wdOffset +120 )
        d["FIRST_VALID_PNT"]  = readX( fid, endi+"l", wdOffset +124 )
        d["LAST_VALID_PNT"]   = readX( fid, endi+"l", wdOffset +128 )
        d["FIRST_POINT"]      = readX( fid, endi+"l", wdOffset +132 )
        d["SPARSING_FACTOR"]  = readX( fid, endi+"l", wdOffset +136 )
        d["SEGMENT_INDEX"]    = readX( fid, endi+"l", wdOffset +140 )
        d["SUBARRAY_COUNT"]   = readX( fid, endi+"l", wdOffset +144 )
        d["SWEEPS_PER_ACQ"]   = readX( fid, endi+"l", wdOffset +148 )
        d["POINTS_PER_PAIR"]  = readX( fid, endi+"h", wdOffset +152 )
        d["PAIR_OFFSET"]      = readX( fid, endi+"h", wdOffset +154 )
        d["VERTICAL_GAIN"]    = readX( fid, endi+"f", wdOffset +156 ) #to get floating values from raw data :
        d["VERTICAL_OFFSET"]  = readX( fid, endi+"f", wdOffset +160 ) #VERTICAL_GAIN * data - VERTICAL_OFFSET
        d["MAX_VALUE"]        = readX( fid, endi+"f", wdOffset +164 )
        d["MIN_VALUE"]        = readX( fid, endi+"f", wdOffset +168 )
        d["NOMINAL_BITS"]     = readX( fid, endi+"h", wdOffset +172 )
        d["NOM_SUBARRAY_COUNT"]= readX( fid, endi+"h",wdOffset +174 )
        d["HORIZ_INTERVAL"]   = readX( fid, endi+"f", wdOffset +176 ) #sampling interval for time domain waveforms
        d["HORIZ_OFFSET"]     = readX( fid, endi+"d", wdOffset +180 ) #trigger offset for the first sweep of the trigger, seconds between the trigger and the first data point
        d["PIXEL_OFFSET"]     = readX( fid, endi+"d", wdOffset +188 )
        d["VERTUNIT"]         = readX( fid, "48s", wdOffset +196 ).decode().split('\x00')[0]
        d["HORUNIT"]          = readX( fid, "48s", wdOffset +244 ).decode().split('\x00')[0]
        d["HORIZ_UNCERTAINTY"]= readX( fid, endi+"f", wdOffset +292 )
        d["TRIGGER_TIME"]     = getTimeStamp( fid, endi, wdOffset +296 )
        d["ACQ_DURATION"]     = readX( fid, endi+"f", wdOffset +312 )
        d["RECORD_TYPE"]      = ["single_sweep","interleaved","histogram","graph","filter_coefficient","complex","extrema","sequence_obsolete","centered_RIS","peak_detect"][ readX( fid, endi+"H", wdOffset +316 ) ]
        d["PROCESSING_DONE"]  = ["no_processing","fir_filter","interpolated","sparsed","autoscaled","no_result","rolling","cumulative"][ readX( fid, endi+"H", wdOffset +318 ) ]
        d["RIS_SWEEPS"]       = readX( fid, endi+"h", wdOffset +322 )
        d["TIMEBASE"]         = ['1_ps/div', '2_ps/div', '5_ps/div', '10_ps/div', '20_ps/div', '50_ps/div', '100_ps/div', '200_ps/div', '500_ps/div', '1_ns/div', '2_ns/div', '5_ns/div', '10_ns/div', '20_ns/div', '50_ns/div', '100_ns/div', '200_ns/div', '500_ns/div', '1_us/div', '2_us/div', '5_us/div', '10_us/div', '20_us/div', '50_us/div', '100_us/div', '200_us/div', '500_us/div', '1_ms/div', '2_ms/div', '5_ms/div', '10_ms/div', '20_ms/div', '50_ms/div', '100_ms/div', '200_ms/div', '500_ms/div', '1_s/div', '2_s/div', '5_s/div', '10_s/div', '20_s/div', '50_s/div', '100_s/div', '200_s/div', '500_s/div', '1_ks/div', '2_ks/div', '5_ks/div', 'EXTERNAL'][ readX( fid, endi+"H", wdOffset +324 ) ]
        d["VERT_COUPLING"]    = ['DC_50_Ohms', 'ground', 'DC_1MOhm', 'ground', 'AC,_1MOhm'][ readX( fid, endi+"H", wdOffset +326 ) ]
        d["PROBE_ATT"]        = readX( fid, endi+"f", wdOffset +328 )
        d["FIXED_VERT_GAIN"]  = ['1_uV/div','2_uV/div','5_uV/div','10_uV/div','20_uV/div','50_uV/div','100_uV/div','200_uV/div','500_uV/div','1_mV/div','2_mV/div','5_mV/div','10_mV/div','20_mV/div','50_mV/div','100_mV/div','200_mV/div','500_mV/div','1_V/div','2_V/div','5_V/div','10_V/div','20_V/div','50_V/div','100_V/div','200_V/div','500_V/div','1_kV/div'][ readX( fid, endi+"H", wdOffset +332 ) ]
        d["BANDWIDTH_LIMIT"]  = ['off', 'on'][ readX( fid, endi+"H", wdOffset +334 ) ]
        d["VERTICAL_VERNIER"] = readX( fid, endi+"f", wdOffset +336 )
        d["ACQ_VERT_OFFSET"]  = readX( fid, endi+"f", wdOffset +340 )
        d["WAVE_SOURCE"]      = readX( fid, endi+"H", wdOffset +344 )
        d["USER_TEXT"]        = readX( fid, "{0}s".format(lUSER_TEXT), wdOffset + lWAVE_DESCRIPTOR ).decode().split('\x00')[0]

        #------------------------
        # Get main sample data with the help of numpys .fromfile(
        #------------------------
        fid.seek( wdOffset + lWAVE_DESCRIPTOR + lUSER_TEXT + lTRIGTIME_ARRAY + lRIS_TIME_ARRAY ) #Seek to WAVE_ARRAY_1
        y = np.fromfile( fid, smplFmt, lWAVE_ARRAY_1 )
        if endi == ">":
            y.byteswap( True )
        y = d["VERTICAL_GAIN"] * y - d["VERTICAL_OFFSET"]
        x = np.arange(1,len(y)+1)*d["HORIZ_INTERVAL"] + d["HORIZ_OFFSET"]
    return x, y, d
Esempio n. 46
0
    def _parse_parameter_file(self):
        """
        Get the various simulation parameters & constants.
        """
        self.domain_left_edge = np.zeros(3, dtype="float")
        self.domain_right_edge = np.zeros(3, dtype="float") + 1.0
        self.dimensionality = 3
        self.refine_by = 2
        self._periodicity = (True, True, True)
        self.cosmological_simulation = True
        self.parameters = {}
        self.parameters.update(constants)
        self.parameters["Time"] = 1.0
        self.file_count = 1
        self.filename_template = self.parameter_filename

        # read the particle header
        self.particle_types = []
        self.particle_types_raw = ()
        assert self._file_particle_header
        with open(self._file_particle_header, "rb") as fh:
            seek = 4
            fh.seek(seek)
            headerstr = fh.read(45).decode("ascii")
            aexpn = np.fromfile(fh, count=1, dtype=">f4")
            aexp0 = np.fromfile(fh, count=1, dtype=">f4")
            amplt = np.fromfile(fh, count=1, dtype=">f4")
            astep = np.fromfile(fh, count=1, dtype=">f4")
            istep = np.fromfile(fh, count=1, dtype=">i4")
            partw = np.fromfile(fh, count=1, dtype=">f4")
            tintg = np.fromfile(fh, count=1, dtype=">f4")
            ekin = np.fromfile(fh, count=1, dtype=">f4")
            ekin1 = np.fromfile(fh, count=1, dtype=">f4")
            ekin2 = np.fromfile(fh, count=1, dtype=">f4")
            au0 = np.fromfile(fh, count=1, dtype=">f4")
            aeu0 = np.fromfile(fh, count=1, dtype=">f4")
            nrowc = np.fromfile(fh, count=1, dtype=">i4")
            ngridc = np.fromfile(fh, count=1, dtype=">i4")
            nspecs = np.fromfile(fh, count=1, dtype=">i4")
            nseed = np.fromfile(fh, count=1, dtype=">i4")
            Om0 = np.fromfile(fh, count=1, dtype=">f4")
            Oml0 = np.fromfile(fh, count=1, dtype=">f4")
            hubble = np.fromfile(fh, count=1, dtype=">f4")
            Wp5 = np.fromfile(fh, count=1, dtype=">f4")
            Ocurv = np.fromfile(fh, count=1, dtype=">f4")
            wspecies = np.fromfile(fh, count=10, dtype=">f4")
            lspecies = np.fromfile(fh, count=10, dtype=">i4")
            extras = np.fromfile(fh, count=79, dtype=">f4")
            boxsize = np.fromfile(fh, count=1, dtype=">f4")
        n = nspecs[0]
        particle_header_vals = {}
        tmp = [
            headerstr,
            aexpn,
            aexp0,
            amplt,
            astep,
            istep,
            partw,
            tintg,
            ekin,
            ekin1,
            ekin2,
            au0,
            aeu0,
            nrowc,
            ngridc,
            nspecs,
            nseed,
            Om0,
            Oml0,
            hubble,
            Wp5,
            Ocurv,
            wspecies,
            lspecies,
            extras,
            boxsize,
        ]
        for i, arr in enumerate(tmp):
            a1 = dmparticle_header_struct[0][i]
            a2 = dmparticle_header_struct[1][i]
            if a2 == 1:
                particle_header_vals[a1] = arr[0]
            else:
                particle_header_vals[a1] = arr[:a2]
        for specie in range(n):
            self.particle_types.append("specie%i" % specie)
        self.particle_types_raw = tuple(self.particle_types)
        ls_nonzero = np.diff(lspecies)[: n - 1]
        ls_nonzero = np.append(lspecies[0], ls_nonzero)
        self.star_type = len(ls_nonzero)
        mylog.info("Discovered %i species of particles", len(ls_nonzero))
        info_str = "Particle populations: " + "%9i " * len(ls_nonzero)
        mylog.info(info_str, *ls_nonzero)
        for k, v in particle_header_vals.items():
            if k in self.parameters.keys():
                if not self.parameters[k] == v:
                    mylog.info(
                        "Inconsistent parameter %s %1.1e  %1.1e",
                        k,
                        v,
                        self.parameters[k],
                    )
            else:
                self.parameters[k] = v
        self.parameters_particles = particle_header_vals
        self.parameters.update(particle_header_vals)
        self.parameters["wspecies"] = wspecies[:n]
        self.parameters["lspecies"] = lspecies[:n]
        self.parameters["ng"] = self.parameters["Ngridc"]
        self.parameters["ncell0"] = self.parameters["ng"] ** 3
        self.parameters["boxh"] = self.parameters["boxsize"]
        self.parameters["total_particles"] = ls_nonzero
        self.domain_dimensions = np.ones(3, dtype="int64") * 2  # NOT ng

        # setup standard simulation params yt expects to see
        # Convert to float to please unyt
        self.current_redshift = float(self.parameters["aexpn"] ** -1.0 - 1.0)
        self.omega_lambda = float(particle_header_vals["Oml0"])
        self.omega_matter = float(particle_header_vals["Om0"])
        self.hubble_constant = float(particle_header_vals["hubble"])
        self.min_level = 0
        self.max_level = 0
        #        self.min_level = particle_header_vals['min_level']
        #        self.max_level = particle_header_vals['max_level']
        #        if self.limit_level is not None:
        #            self.max_level = min(
        #                self.limit_level, particle_header_vals['max_level'])
        #        if self.force_max_level is not None:
        #            self.max_level = self.force_max_level
        self.hubble_time = 1.0 / (self.hubble_constant * 100 / 3.08568025e19)
        self.parameters["t"] = a2b(self.parameters["aexpn"])
        self.current_time = self.quan(b2t(self.parameters["t"]), "Gyr")
        self.gamma = self.parameters["gamma"]
        mylog.info("Max level is %02i", self.max_level)
Esempio n. 47
0
def gen_data_dx(fmap_shape,
                filter_shape,
                pad_,
                stride_,
                dilation_,
                expect_file,
                attrs=None):
    block_size = 16
    in_n, in_c, in_h, in_w = fmap_shape
    cout, cin, w_h, w_w = filter_shape
    assert in_c == cin

    in_c = (in_c + block_size - 1) // block_size * block_size
    cout = (cout + block_size - 1) // block_size * block_size

    pad_top, pad_bottom, pad_left, pad_right = pad_
    stride_h, stride_w = stride_

    dilation_h, dilation_w = dilation_
    assert dilation_h == 1
    assert dilation_w == 1
    x_shape = (in_n, in_c, in_h, in_w)
    w_shape = (cout, in_c, w_h, w_w)
    b_shape = (w_shape[0], )

    p_top = w_h - pad_top - 1
    p_left = w_w - pad_left - 1
    p_bottom = in_h + pad_top - stride_h * (
        (in_h + pad_top + pad_bottom - w_h) // stride_h + 1)
    p_right = in_w + pad_left - stride_w * (
        (in_w + pad_left + pad_right - w_w) // stride_w + 1)

    print("Data gen ...")
    x = random_gaussian(x_shape, miu=1, sigma=0.1).astype(np.float16)
    w = random_gaussian(w_shape, miu=1, sigma=0.1).astype(np.float16)

    Ho = (x_shape[2] + pad_top + pad_bottom - w_shape[2]) // stride_h + 1
    Wo = (x_shape[3] + pad_left + pad_right - w_shape[3]) // stride_w + 1

    out_shape = (x_shape[0], w_shape[0], Ho, Wo)
    dout = random_gaussian(out_shape, miu=1, sigma=0.1).astype(np.float16)

    dx_shape = (in_n, in_c // block_size, in_h, in_w, block_size)
    flag_w = os.environ.get("WRITE_TO_DISK", "No")
    if (flag_w == "No") and (os.path.exists(expect_file) == True):
        # read expect from file
        dx = np.fromfile(expect_file, np.float16).reshape(dx_shape)
    else:
        # compute expect data:
        dx = calculate_conv_backprop_input(x, w, dout,
                                           [p_top, p_bottom, p_left, p_right],
                                           [stride_h, stride_w])

    if flag_w == "Yes":
        # write expect to file
        with open(expect_file, "w+") as file:
            dx.tofile(file)
            file.close()

    # reshape
    C0 = block_size
    ON, OC, OH, OW = out_shape
    WN, WC, WH, WW = w_shape
    dout = dout.reshape(ON, OC // C0, C0, OH, OW).transpose(0, 1, 3, 4,
                                                            2).copy()
    w = w.reshape(WN, WC // C0, C0, WH, WW).transpose(1, 3, 4, 0, 2).copy()

    return dout, w, dx
import numpy as np
import matplotlib.pyplot as plt
import KleinFunction as Klein

image_size = 28
no_of_different_labels = 10
image_pixels = image_size*image_size
usebinQ = True
if(usebinQ is False):
    #train_data = np.loadtxt("../../mnist_train.csv",delimiter=",")
    train_data = np.loadtxt("../../mnist_train_100.csv",delimiter=",")
    fac = 255 # normalising data values to [0., 1.]
    train_imgs = np.asfarray(train_data[:, 1:]) / fac
    train_labels = np.asfarray(train_data[:, :1])
else:
    train_imgs = np.fromfile("../../mnist_train_imgs_binary.dat").reshape((60000,image_pixels))
    train_labels = np.fromfile("../../mnist_train_labels_binary.dat").reshape((60000,1))
test_imgs = np.fromfile("../../mnist_test_imgs_binary.dat").reshape((10000,image_pixels))
test_labels = np.fromfile("../../mnist_test_labels_binary.dat").reshape((10000,1))


no_of_hidden_nodes = 100
accuracy = 0.96
epochs = 100#30
bias = None
nAlgorithm = 1 # excluding minibatch
run_minibatch = False
arr_learning_rate = [0.001,0.002,0.005,0.01,0.02,0.05,0.1,0.2,0.5,1.0]
save_frequency = 6000
threshold = 0.0005
arr_momentum_para = [0.,0.9,0.] # 0.9 is for the algorithm with momentum
Esempio n. 49
0
        ebs = [i * 1e-3 for i in range(1, 10, 2)]
    else:
        ebs = [i * 1e-2 for i in range(1, 8, 2)] + [0.1]

    idxrange = [0]

    pid = str(os.getpid()).strip()
    data = np.zeros((len(ebs) + 1, len(idxrange) + 1, 9), dtype=np.float32)
    for i in range(9):
        data[1:, 0, i] = ebs
        data[0, 1:, i] = idxrange

    for i, idx in enumerate(idxrange):
        filename = "%s.dat.log10" % field
        filepath = os.path.join(datafolder, filename)
        a = np.fromfile(filepath, dtype=np.float32)
        a = a - np.min(a)
        filepath = filepath + ".positive"
        a.tofile(filepath)
        for j, eb in enumerate(ebs):
            os.system("python3 Autoencoder_Prototype.py -c %s -e %f -n %s" %
                      (filepath, eb, field))
            zpath = filepath + ".z"
            dpath = zpath + ".d"
            dvname = filepath.split("/")[-1] + ".dvalue"
            dvpath = filepath + ".dvalue"
            os.system("du -s %s*&>%s.txt" % (filepath, pid))
            origsize = 0
            compressedsize = 0
            with open("%s.txt" % pid, "r") as f:
                lines = f.read().splitlines()
Esempio n. 50
0
    def load_weights(self, weightfile):
        #Open the weights file
        fp = open(weightfile, "rb")

        #The first 5 values are header information
        # 1. Major version number
        # 2. Minor Version Number
        # 3. Subversion number
        # 4,5. Images seen by the network (during training)
        header = np.fromfile(fp, dtype=np.int32, count=5)
        self.header = torch.from_numpy(header)
        self.seen = self.header[3]

        weights = np.fromfile(fp, dtype=np.float32)

        ptr = 0
        for i in range(len(self.module_list)):
            module_type = self.blocks[i + 1]["type"]

            #If module_type is convolutional load weights
            #Otherwise ignore.

            if module_type == "convolutional":
                model = self.module_list[i]
                try:
                    batch_normalize = int(self.blocks[i +
                                                      1]["batch_normalize"])
                except:
                    batch_normalize = 0

                conv = model[0]
                if (batch_normalize):
                    bn = model[1]

                    #Get the number of weights of Batch Norm Layer
                    num_bn_biases = bn.bias.numel()

                    #Load the weights
                    bn_biases = torch.from_numpy(weights[ptr:ptr +
                                                         num_bn_biases])
                    ptr += num_bn_biases

                    bn_weights = torch.from_numpy(weights[ptr:ptr +
                                                          num_bn_biases])
                    ptr += num_bn_biases

                    bn_running_mean = torch.from_numpy(weights[ptr:ptr +
                                                               num_bn_biases])
                    ptr += num_bn_biases

                    bn_running_var = torch.from_numpy(weights[ptr:ptr +
                                                              num_bn_biases])
                    ptr += num_bn_biases

                    #Cast the loaded weights into dims of model weights.
                    bn_biases = bn_biases.view_as(bn.bias.data)
                    bn_weights = bn_weights.view_as(bn.weight.data)
                    bn_running_mean = bn_running_mean.view_as(bn.running_mean)
                    bn_running_var = bn_running_var.view_as(bn.running_var)

                    #Copy the data to model
                    bn.bias.data.copy_(bn_biases)
                    bn.weight.data.copy_(bn_weights)
                    bn.running_mean.copy_(bn_running_mean)
                    bn.running_var.copy_(bn_running_var)
                else:
                    #Number of biases
                    num_biases = conv.bias.numel()

                    #Load the weights
                    conv_biases = torch.from_numpy(weights[ptr:ptr +
                                                           num_biases])
                    ptr = ptr + num_biases

                    #reshape the loaded weights according to the dims of the model weights
                    conv_biases = conv_biases.view_as(conv.bias.data)

                    #Finally copy the data
                    conv.bias.data.copy_(conv_biases)

                #Let us load the weights for the Convolutional layers
                num_weights = conv.weight.numel()

                #Do the same as above for weights
                conv_weights = torch.from_numpy(weights[ptr:ptr + num_weights])
                ptr = ptr + num_weights

                conv_weights = conv_weights.view_as(conv.weight.data)
                conv.weight.data.copy_(conv_weights)
Esempio n. 51
0
def read_all_images(path_to_data):
    with open(path_to_data, 'rb') as f:
        everything = np.fromfile(f, dtype=np.uint8)
        images = np.reshape(everything, (-1, 3, 96, 96))
        images = np.transpose(images, (0, 3, 2, 1))
        return images
Esempio n. 52
0
def load_weights(var_list, weights_file):
    """
    Loads and converts pre-trained weights.
    param:
        var_list: list of network variables.
        weights_file: name of the binary file.
    """
    with open(weights_file, "rb") as fp:
        np.fromfile(fp, dtype=np.int32, count=5)
        weights = np.fromfile(fp, dtype=np.float32)

    ptr = 0
    i = 0
    assign_ops = []
    while i < len(var_list) - 1:
        try:
            var1 = var_list[i]
            var2 = var_list[i + 1]
            # do something only if we process conv layer
            if 'Conv' in var1.name.split('/')[-2]:
                # check type of next layer
                if 'BatchNorm' in var2.name.split('/')[-2]:
                    # load batch norm params
                    gamma, beta, mean, var = var_list[i + 1:i + 5]
                    batch_norm_vars = [beta, gamma, mean, var]
                    for var in batch_norm_vars:
                        shape = var.shape.as_list()
                        num_params = np.prod(shape)
                        var_weights = weights[ptr:ptr + num_params].reshape(shape)
                        ptr += num_params
                        assign_ops.append(tf.assign(var, var_weights, validate_shape=True))
                    # we move the pointer by 4, because we loaded 4 variables
                    i += 4
                elif 'Conv' in var2.name.split('/')[-2]:
                    # load biases
                    bias = var2
                    bias_shape = bias.shape.as_list()
                    bias_params = np.prod(bias_shape)
                    bias_weights = weights[ptr:ptr +
                                           bias_params].reshape(bias_shape)
                    ptr += bias_params
                    assign_ops.append(tf.assign(bias, bias_weights, validate_shape=True))
                    # we loaded 1 variable
                    i += 1
                # we can load weights of conv layer
                shape = var1.shape.as_list()
                num_params = np.prod(shape)

                var_weights = weights[ptr:ptr + num_params].reshape(
                    (shape[3], shape[2], shape[0], shape[1]))
                # remember to transpose to column-major
                var_weights = np.transpose(var_weights, (2, 3, 1, 0))
                ptr += num_params
                assign_ops.append(
                    tf.assign(var1, var_weights, validate_shape=True))
                i += 1
        except ValueError:
            print("tensor", i)


    return assign_ops
Esempio n. 53
0
def read_mrc(path, read_data=True, show_progress=False):
    path = os.path.realpath(path)
    with open(path, 'rb') as f:
        mrc = {}
        mrc['nx'] = int(struct.unpack('i', f.read(4))[0])
        mrc['ny'] = int(struct.unpack('i', f.read(4))[0])
        mrc['nz'] = int(struct.unpack('i', f.read(4))[0])
        mrc['mode'] = struct.unpack('i', f.read(4))[0]
        mrc['nxstart'] = struct.unpack('i', f.read(4))[0]
        mrc['nystart'] = struct.unpack('i', f.read(4))[0]
        mrc['nzstart'] = struct.unpack('i', f.read(4))[0]
        mrc['mx'] = struct.unpack('i', f.read(4))[0]
        mrc['my'] = struct.unpack('i', f.read(4))[0]
        mrc['mz'] = struct.unpack('i', f.read(4))[0]
        mrc['xlen'] = struct.unpack('f', f.read(4))[0]
        mrc['ylen'] = struct.unpack('f', f.read(4))[0]
        mrc['zlen'] = struct.unpack('f', f.read(4))[0]
        mrc['alpha'] = struct.unpack('f', f.read(4))[0]
        mrc['beta'] = struct.unpack('f', f.read(4))[0]
        mrc['gamma'] = struct.unpack('f', f.read(4))[0]
        mrc['mapc'] = struct.unpack('i', f.read(4))[0]
        mrc['mapr'] = struct.unpack('i', f.read(4))[0]
        mrc['maps'] = struct.unpack('i', f.read(4))[0]
        mrc['amin'] = struct.unpack('f', f.read(4))[0]
        mrc['amax'] = struct.unpack('f', f.read(4))[0]
        mrc['amean'] = struct.unpack('f', f.read(4))[0]
        mrc['ispg'] = struct.unpack('h', f.read(2))[0]
        mrc['nsymbt'] = struct.unpack('h', f.read(2))[0]
        mrc['next'] = struct.unpack('i', f.read(4))[0]
        mrc['creatid'] = struct.unpack('h', f.read(2))[0]
        mrc['unused1'] = struct.unpack(('c' * 30), f.read(30))[0]
        mrc['nint'] = struct.unpack('h', f.read(2))[0]
        mrc['nreal'] = struct.unpack('h', f.read(2))[0]
        mrc['unused2'] = struct.unpack(('c' * 28), f.read(28))[0]
        mrc['idtype'] = struct.unpack('h', f.read(2))[0]
        mrc['lens'] = struct.unpack('h', f.read(2))[0]
        mrc['nd1'] = struct.unpack('h', f.read(2))[0]
        mrc['nd2'] = struct.unpack('h', f.read(2))[0]
        mrc['vd1'] = struct.unpack('h', f.read(2))[0]
        mrc['vd2'] = struct.unpack('h', f.read(2))[0]
        mrc['tiltangles'] = struct.unpack(('f' * 6), f.read((4 * 6)))
        mrc['xorg'] = struct.unpack('f', f.read(4))[0]
        mrc['yorg'] = struct.unpack('f', f.read(4))[0]
        mrc['zorg'] = struct.unpack('f', f.read(4))[0]
        mrc['cmap'] = struct.unpack(('c' * 4), f.read(4))
        mrc['stamp'] = struct.unpack(('c' * 4), f.read(4))
        mrc['rms'] = struct.unpack('f', f.read(4))[0]
        mrc['nlabl'] = struct.unpack('i', f.read(4))[0]
        mrc['labl'] = struct.unpack(('c' * 800), f.read(800))
        size = [mrc['nx'], mrc['ny'], mrc['nz']]
        n_voxel = N.prod(size)
        extended = {}
        extended['magnification'] = [0]
        extended['exp_time'] = [0]
        extended['pixelsize'] = [0]
        extended['defocus'] = [0]
        extended['a_tilt'] = ([0] * mrc['nz'])
        extended['tiltaxis'] = [0]
        if (mrc['next'] != 0):
            nbh = (mrc['next'] / 128)
            if (nbh == 1024):
                for lauf in range(nbh):
                    extended['a_tilt'][lauf] = struct.unpack('f', f.read(4))[0]
                    extended['b_tilt'][lauf] = struct.unpack('f', f.read(4))[0]
                    extended['x_stage'][lauf] = struct.unpack('f',
                                                              f.read(4))[0]
                    extended['y_stage'][lauf] = struct.unpack('f',
                                                              f.read(4))[0]
                    extended['z_stage'][lauf] = struct.unpack('f',
                                                              f.read(4))[0]
                    extended['x_shift'][lauf] = struct.unpack('f',
                                                              f.read(4))[0]
                    extended['y_shift'][lauf] = struct.unpack('f',
                                                              f.read(4))[0]
                    extended['defocus'][lauf] = struct.unpack('f',
                                                              f.read(4))[0]
                    extended['exp_time'][lauf] = struct.unpack('f',
                                                               f.read(4))[0]
                    extended['mean_int'][lauf] = struct.unpack('f',
                                                               f.read(4))[0]
                    extended['tiltaxis'][lauf] = struct.unpack('f',
                                                               f.read(4))[0]
                    extended['tiltaxis'][lauf] = struct.unpack('f',
                                                               f.read(4))[0]
                    extended['pixelsize'][lauf] = struct.unpack(
                        'f', f.read(4))[0]
                    extended['magnification'][lauf] = struct.unpack(
                        'f', f.read(4))[0]
                    f.seek(offset=(128 - 52), whence=1)
                else:
                    f.seek(offset=MRC.__next__, whence=1)
        if read_data:
            slice_voxel_num = (mrc['nx'] * mrc['ny'])
            v = None
            for i in range(mrc['nz']):
                if show_progress:
                    print('\r', i, '   ', end=' ')
                    sys.stdout.flush()
                if (mrc['mode'] == 0):
                    if (v is None):
                        v = N.zeros(size, dtype=N.int8)
                    data_read = N.fromfile(f,
                                           dtype=N.int8,
                                           count=slice_voxel_num)
                elif (mrc['mode'] == 1):
                    if (v is None):
                        v = N.zeros(size, dtype=N.int16)
                    data_read = N.fromfile(f,
                                           dtype=N.int16,
                                           count=slice_voxel_num)
                elif (mrc['mode'] == 2):
                    if (v is None):
                        v = N.zeros(size, dtype=N.float32)
                    data_read = N.fromfile(f,
                                           dtype=N.float32,
                                           count=slice_voxel_num)
                else:
                    raise Exception(
                        'Sorry, i cannot read this as an MRC-File !!!')
                    data_read = None
                if (data_read.size != slice_voxel_num):
                    import pdb
                    pdb.set_trace()
                v[:, :, i] = N.reshape(data_read, (mrc['nx'], mrc['ny']),
                                       order='F')
        else:
            v = None
        h = {}
        h['Voltage'] = None
        h['Cs'] = None
        h['Aperture'] = None
        h['Magnification'] = extended['magnification'][0]
        h['Postmagnification'] = None
        h['Exposuretime'] = extended['exp_time'][0]
        h['Objectpixelsize'] = (extended['pixelsize'][0] * 1000000000.0)
        h['Microscope'] = None
        h['Pixelsize'] = None
        h['CCDArea'] = None
        h['Defocus'] = extended['defocus'][0]
        h['Astigmatism'] = None
        h['AstigmatismAngle'] = None
        h['FocusIncrement'] = None
        h['CountsPerElectron'] = None
        h['Intensity'] = None
        h['EnergySlitwidth'] = None
        h['EnergyOffset'] = None
        h['Tiltangle'] = extended['a_tilt'][:mrc['nz']]
        h['Tiltaxis'] = extended['tiltaxis'][0]
        h['Username'] = None
        h['Date'] = None
        h['Size'] = [mrc['nx'], mrc['ny'], mrc['nz']]
        h['Comment'] = None
        h['Parameter'] = None
        h['Fillup'] = None
        h['Filename'] = path
        h['Marker_X'] = None
        h['Marker_Y'] = None
        h['MRC'] = mrc
    return {
        'header': h,
        'value': v,
    }
Esempio n. 54
0
 def __init__(self, fileName):
     f = open(fileName, 'rb')
     a = np.fromfile(f, dtype=np.uint64)
     self.reqTimes = a.reshape((a.shape[0], 1))
     f.close()
Esempio n. 55
0
# set up model
model = Sequential()
model.add(Conv1D(64, 5, input_shape=(128, 44), activation='relu'))
model.add(Dropout(0.2))
model.add(Conv1D(32, 5, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1))
model.compile(optimizer='rmsprop', loss='mse')

print('done setting up model!')

# get training and validation data
X = np.load('data/train_frames.npy')
y = np.fromfile('data/train.txt', sep=' ')[:-1]
idx = np.random.permutation(len(X))
X, y = X[idx], y[idx]
X_validate = X[:len(X) // 5]
X_train = X[len(X) // 5:]
y_validate = y[:len(X) // 5]
y_train = y[len(X) // 5:]

print('done loading data!')

model.fit(X_train, y_train, epochs=40, batch_size=32)

print('done fitting model!')
print(model.summary())

y_validate_pred = model.predict(X_validate)
Esempio n. 56
0
def read_labels(path_to_labels):
    with open(path_to_labels, 'rb') as f:
        labels = np.fromfile(f, dtype=np.uint8)
        labels = labels.astype(np.uint8)
        return labels
Esempio n. 57
0
    def readFromGIAnT(self, h5file, setmaster2zero=None,
                            zfile=None, lonfile=None, latfile=None, filetype='f',
                            incidence=None, heading=None, inctype='onefloat', 
                            field='recons', keepnan=False, mask=None, readModel=False):
        '''
        Read the output from a typical GIAnT h5 output file.

        Args:
            * h5file        : Input h5file

        Kwargs:
            * setmaster2zero: If index is provided, master will be replaced by zeros (no substraction)
            * zfile         : File with elevation 
            * lonfile       : File with longitudes 
            * latfile       : File with latitudes 
            * filetype      : type of data in lon, lat and elevation file (default: 'f')
            * incidence     : Incidence angle (degree)
            * heading       : Heading angle (degree)
            * inctype       : Type of the incidence and heading values (see insar.py for details). Can be 'onefloat', 'grd', 'binary', 'binaryfloat'
            * field         : Name of the field in the h5 file.
            * mask          : Adds a common mask to the data. mask is an array the same size as the data with nans and 1. It can also be a tuple with a key word in the h5file, a value and 'above' or 'under'
            * readModel     : Reads the model parameters

        Returns:
            * None
        '''

        # open the h5file
        h5in = h5py.File(h5file, 'r')
        self.h5in = h5in

        # Get the data
        data = h5in[field]

        # Get some sizes
        nDates = data.shape[0]
        nLines = data.shape[1]
        nCols  = data.shape[2]

        # Deal with the mask instructions
        if mask is not None:
            if type(mask) is tuple:
                key = mask[0]
                value = mask[1]
                instruction = mask[2]
                mask = np.ones((nLines, nCols))
                if instruction in ('above'):
                    mask[np.where(h5in[key][:]>value)] = np.nan
                elif instruction in ('under'):
                    mask[np.where(h5in[key][:]<value)] = np.nan
                else:
                    print('Unknow instruction type for Masking...')
                    sys.exit(1)

        # Read Lon Lat
        if lonfile is not None:
            self.lon = np.fromfile(lonfile, dtype=filetype)
        if latfile is not None:
            self.lat = np.fromfile(latfile, dtype=filetype)

        # Compute utm
        self.x, self.y = self.ll2xy(self.lon, self.lat) 

        # Elevation
        if zfile is not None:
            self.elevation = insar('Elevation', utmzone=self.utmzone, 
                                   verbose=False, lon0=self.lon0, lat0=self.lat0,
                                   ellps=self.ellps)
            self.elevation.read_from_binary(zfile, lonfile, latfile, 
                                            incidence=None, heading=None, 
                                            remove_nan=False, remove_zeros=False, 
                                            dtype=filetype)
            self.z = self.elevation.vel

        # Get the time
        dates = h5in['dates']
        self.time = []
        for i in range(nDates):
            self.time.append(dt.datetime.fromordinal(int(dates[i])))

        # Create a list to hold the dates
        self.timeseries = []

        # Iterate over the dates
        for i in range(nDates):
            
            # Get things
            date = self.time[i]
            dat = data[i,:,:]

            # Mask?
            if mask is not None:
                dat *= mask

            # check master date
            if i is setmaster2zero:
                dat[:,:] = 0.

            # Create an insar object
            sar = insar('{} {}'.format(self.name,date.isoformat()), utmzone=self.utmzone, 
                        verbose=False, lon0=self.lon0, lat0=self.lat0, ellps=self.ellps)

            # Put thing in the insarrate object
            sar.vel = dat.flatten()
            sar.lon = self.lon
            sar.lat = self.lat
            sar.x = self.x
            sar.y = self.y

            # Things should remain None
            sar.corner = None
            sar.err = None

            # Set factor
            sar.factor = 1.0

            # Take care of the LOS
            if incidence is not None and heading is not None:
                sar.inchd2los(incidence, heading, origin=inctype)
            else:
                sar.los = np.zeros((sar.vel.shape[0], 3))

            # Store the object in the list
            self.timeseries.append(sar)

        # Keep incidence and heading
        self.incidence = incidence
        self.heading = heading
        self.inctype = inctype

        # if readVel
        if readModel:
            self.readModelFromGIAnT()

        # Make a common mask if asked
        if not keepnan:
            # Create an array
            checkNaNs = np.zeros(self.lon.shape)
            checkNaNs[:] = False
            # Trash the pixels where there is only NaNs
            for sar in self.timeseries:
                checkNaNs += np.isfinite(sar.vel)
            uu = np.flatnonzero(checkNaNs==0)
            # Keep 'em
            for sar in self.timeseries:
                sar.reject_pixel(uu)
            if zfile is not None:
                elevation.reject_pixel(uu)
            self.reject_pixel(uu)
        h5in.close()

        # all done
        return
Esempio n. 58
0
"""
Tally benchmarks
"""
import os, glob
import numpy as np

normalize = 0
np_ = []
tt = []

for d in glob.glob( '[0-9]*' ):
    prm = {}
    path = os.path.join( d, 'parameters.py' )
    exec open( path ) in prm
    np_ += [ np.product( prm['np3'] ) ]
    t = np.fromfile( d + '/prof/8step', 'f' )
    tt += [ np.sum( t[1:-1] ) / (len(t)-2) ]

if normalize:
    tt = [ t / tt[0] for t in tt ]

print 'time cores'
for n, t in zip( np_, tt ):
    print '%4.2f %d' % (t, n)

if 0:
    import matplotlib.pyplot as plt
    n = len( tt )
    ax = plt.plot( tt, 'ko-' )[0].axes
    ax.hold( True )
    ax.plot( [-1, n], [tt[0],tt[0]], 'k--' )
Esempio n. 59
0
def _read_cells(f, line):
    # If the line is self-contained, it is merely a declaration of the total number of
    # points.
    if line.count("(") == line.count(")"):
        return None, None

    out = re.match("\\s*\\(\\s*(|20|30)12\\s*\\(([^\\)]+)\\).*", line)
    assert out is not None
    a = [int(num, 16) for num in out.group(2).split()]
    if len(a) <= 4:
        raise ReadError()
    first_index = a[1]
    last_index = a[2]
    num_cells = last_index - first_index + 1
    zone_type = a[3]
    element_type = a[4]

    if zone_type == 0:
        # dead zone
        return None, None

    key, num_nodes_per_cell = {
        0: ("mixed", None),
        1: ("triangle", 3),
        2: ("tetra", 4),
        3: ("quad", 4),
        4: ("hexahedron", 8),
        5: ("pyramid", 5),
        6: ("wedge", 6),
    }[element_type]

    # Skip to the opening `(` and make sure that there's no non-whitespace character
    # between the last closing bracket and the `(`.
    if line.strip()[-1] != "(":
        c = None
        while True:
            c = f.read(1).decode()
            if c == "(":
                break
            if not re.match("\\s", c):
                # Found a non-whitespace character before `(`.
                # Assume this is just a declaration line then and
                # skip to the closing bracket.
                _skip_to(f, ")")
                return None, None

    if key == "mixed":
        # From
        # <https://www.afs.enea.it/project/neptunius/docs/fluent/html/ug/node1470.htm>:
        #
        # > If a zone is of mixed type (element-type=0), it will have a body that
        # > lists the element type of each cell.
        #
        # No idea where the information other than the element types is stored
        # though. Skip for now.
        data = None
    else:
        # read cell data
        if out.group(1) == "":
            # ASCII cells
            data = np.empty((num_cells, num_nodes_per_cell), dtype=int)
            for k in range(num_cells):
                line = f.readline().decode()
                dat = line.split()
                if len(dat) != num_nodes_per_cell:
                    raise ReadError()
                data[k] = [int(d, 16) for d in dat]
        else:
            if key == "mixed":
                raise ReadError("Cannot read mixed cells in binary mode yet")
            # binary cells
            if out.group(1) == "20":
                dtype = np.int32
            else:
                if out.group(1) != "30":
                    ReadError(f"Expected keys '20' or '30', got {out.group(1)}.")
                dtype = np.int64
            shape = (num_cells, num_nodes_per_cell)
            count = shape[0] * shape[1]
            data = np.fromfile(f, count=count, dtype=dtype).reshape(shape)

    # make sure that the data set is properly closed
    _skip_close(f, 2)
    return key, data
Esempio n. 60
0
def session_plot(file_path, fig_no=1, return_fig=False):
    '''Plot the states, events and analog data for a pyControl session.'''

    # Import data file.

    with open(file_path, 'r') as f:
        all_lines = [line.strip() for line in f.readlines() if line.strip()]

    # Import any analog files.

    file_dir = os.path.dirname(file_path)
    file_name = os.path.split(file_path)[1]
    analog_files = [
        f for f in os.listdir(file_dir)
        if file_name.split('.')[0] in f and f != file_name
    ]

    analog_data = {}

    for analog_file in analog_files:
        analog_name = analog_file[len(file_name.split('.')[0]) + 1:-4]
        with open(os.path.join(file_dir, analog_file), 'rb') as f:
            analog_data[analog_name] = np.fromfile(f,
                                                   dtype='<i').reshape(-1, 2)

    # Extract state entry and event times.

    states_dict = eval(next(line for line in all_lines if line[0] == 'S')[2:])
    events_dict = eval(next(line for line in all_lines if line[0] == 'E')[2:])

    ID2name = {v: k for k, v in {**states_dict, **events_dict}.items()}

    data_lines = [line[2:].split(' ') for line in all_lines if line[0] == 'D']

    event_times = np.array([
        int(dl[0]) for dl in data_lines if int(dl[1]) in events_dict.values()
    ]) / 1000
    event_IDs = np.array([
        int(dl[1]) for dl in data_lines if int(dl[1]) in events_dict.values()
    ])

    state_times = np.array([
        int(dl[0]) for dl in data_lines if int(dl[1]) in states_dict.values()
    ]) / 1000
    state_IDs = np.array([
        int(dl[1]) for dl in data_lines if int(dl[1]) in states_dict.values()
    ])

    state_durations = np.diff(state_times)

    # Plotting

    fig = plt.figure(fig_no, figsize=[18, 12])
    fig.clf()

    n_subplots = 3 if analog_data else 2

    # Plot states.

    ax1 = plt.subplot(n_subplots, 1, 1)
    plt.quiver(state_times[:-1],
               state_IDs[:-1],
               state_durations,
               np.zeros(state_durations.shape),
               state_IDs[:-1],
               cmap='gist_rainbow',
               headwidth=1,
               headlength=0,
               minlength=0,
               scale=1,
               width=10,
               units='dots',
               scale_units='xy')
    ax1.set_yticks(sorted(states_dict.values()))
    ax1.set_yticklabels([ID2name[ID] for ID in sorted(states_dict.values())])
    ax1.set_ylim(
        min(states_dict.values()) - 0.5,
        max(states_dict.values()) + 0.5)
    ax1.set_facecolor('black')

    # Plot events.

    ax2 = plt.subplot(n_subplots, 1, 2, sharex=ax1)
    plt.scatter(event_times, event_IDs, c=event_IDs, s=6, cmap='gist_rainbow')
    ax2.set_yticks(sorted(events_dict.values()))
    ax2.set_yticklabels([ID2name[ID] for ID in sorted(events_dict.values())])
    ax2.set_ylim(
        min(events_dict.values()) - 0.5,
        max(events_dict.values()) + 0.5)
    ax2.set_facecolor('black')

    # Plot analog data

    if analog_data:
        ax3 = plt.subplot(n_subplots, 1, 3, sharex=ax1)
        for name, data in analog_data.items():
            plt.plot(data[:, 0] / 1000, data[:, 1], label=name)
        ax3.set_facecolor('black')
        ax3.set_ylabel('Signal value')
        ax3.legend()

    ax1.set_xlim(0, state_times[-1])

    plt.xlabel('Time (seconds)')
    plt.tight_layout()

    if return_fig:  # Return the figure and axes
        return fig, ax1