Example #1
0
    def load( collada, localscope, node ):
        indexnode = node.find(tag('p'))
        if indexnode is None: raise DaeIncompleteError('Missing index in polylist')
        vcountnode = node.find(tag('vcount'))
        if vcountnode is None: raise DaeIncompleteError('Missing vcount in polylist')

        try:
            if vcountnode.text is None:
                vcounts = numpy.array([], dtype=numpy.int32)
            else:
                vcounts = numpy.fromstring(vcountnode.text, dtype=numpy.int32, sep=' ')
            vcounts[numpy.isnan(vcounts)] = 0
        except ValueError as ex:
            raise DaeMalformedError('Corrupted vcounts in polylist')

        all_inputs = primitive.Primitive._getInputs(collada, localscope, node.findall(tag('input')))

        try:
            if indexnode.text is None:
                index = numpy.array([], dtype=numpy.int32)
            else:
                index = numpy.fromstring(indexnode.text, dtype=numpy.int32, sep=' ')
            index[numpy.isnan(index)] = 0
        except: raise DaeMalformedError('Corrupted index in polylist')

        polylist = Polylist(all_inputs, node.get('material'), index, vcounts, node)
        return polylist
Example #2
0
 def __init__(self, file_name=None, samples=None, sample_width=None, time=0, word="anonymous"):
     """
     initialize WavFile object
     you can use name of file from file system
     or list of frames, sample_width and time, this variant good
         when you record audio yourself and don't want save it to disk
     @param file_name: name of file (if you use file from disk)
     @param samples: frames which represents wave file
     @param sample_width: width of samples
     @param time: length of recorded file in seconds
     """
     self.types = {1: np.int8, 2: np.int16, 4: np.int32}
     if file_name:
         if os.path.isfile(file_name):
             self.file_name = file_name
             wav = wave.open(file_name, mode="r")
             (self.number_of_channels, self.sample_width, self.frame_rate, self.number_of_frames, self.comp_type,
                 self.comp_name) = wav.getparams()
             self.file_size_sec = self.number_of_frames/self.frame_rate
             self.samples = np.fromstring(wav.readframes(self.number_of_frames), dtype=self.types[self.sample_width])
             # self.samples = [i/32768 for i in self.samples]
             wav.close()
         else:
             raise Exception("File '" + file_name + "' is not exists!")
     elif not (samples is None) and not(sample_width is None) and time > 0:
         self.file_name = word
         self.sample_width = sample_width
         self.samples = np.fromstring(samples, dtype=self.types[self.sample_width])
         self.file_size_sec = time
         self.number_of_channels = 2
     else:
         raise Exception("Wrong input data!")
	def plotFFT(self):
	# Generates plot of the FFT output. To view, run plotFFT.py in a separate terminal
		figure1 = plt.figure(num= None, figsize=(12,12), dpi=80, facecolor='w', edgecolor='w')
		plot1 = figure1.add_subplot(111)
		line1, = plot1.plot( np.arange(0,512,0.5), np.zeros(1024), 'g-')
		plt.xlabel('freq (MHz)',fontsize = 12)
		plt.ylabel('Amplitude',fontsize = 12)
		plt.title('Pre-mixer FFT',fontsize = 12)
		plt.xticks(np.arange(0,512,50))
		plt.xlim((0,512))
		plt.grid()
		plt.show(block = False)
		count = 0 
		stop = 1.0e6
		while(count < stop):
			overflow = np.fromstring(self.fpga.read('overflow', 4), dtype = '>B')
			print overflow
			self.fpga.write_int('fft_snap_ctrl',0)
			self.fpga.write_int('fft_snap_ctrl',1)
			fft_snap = (np.fromstring(self.fpga.read('fft_snap_bram',(2**9)*8),dtype='>i2')).astype('float')
			I0 = fft_snap[0::4]
			Q0 = fft_snap[1::4]
			I1 = fft_snap[2::4]
			Q1 = fft_snap[3::4]
			mag0 = np.sqrt(I0**2 + Q0**2)
			mag1 = np.sqrt(I1**2 + Q1**2)
			fft_mags = np.hstack(zip(mag0,mag1))
			plt.ylim((0,np.max(fft_mags) + 300.))
			line1.set_ydata((fft_mags))
			plt.draw()
			count += 1
Example #4
0
File: meg.py Project: B-Rich/PyMVPA
    def __init__(self, source):
        """Reader MEG data from texfiles or file-like objects.

        Parameters
        ----------
        source : str or file-like
          Strings are assumed to be filenames (with `.gz` suffix
          compressed), while all other object types are treated as file-like
          objects.
        """
        self.ntimepoints = None
        self.timepoints = None
        self.nsamples = None
        self.channelids = []
        self.data = []
        self.samplingrate = None

        # open textfiles
        if isinstance(source, str):
            if source.endswith('.gz'):
                externals.exists('gzip', raise_=True)
                import gzip
                source = gzip.open(source, 'r')
            else:
                source = open(source, 'r')

        # read file
        for line in source:
            # split ID
            colon = line.find(':')

            # ignore lines without id
            if colon == -1:
                continue

            id = line[:colon]
            data = line[colon+1:].strip()
            if id == 'Sample Number':
                timepoints = np.fromstring(data, dtype=int, sep='\t')
                # one more as it starts with zero
                self.ntimepoints = int(timepoints.max()) + 1
                self.nsamples = int(len(timepoints) / self.ntimepoints)
            elif id == 'Time':
                self.timepoints = np.fromstring(data,
                                               dtype=float,
                                               count=self.ntimepoints,
                                               sep='\t')
                self.samplingrate = self.ntimepoints \
                    / (self.timepoints[-1] - self.timepoints[0])
            else:
                # load data
                self.data.append(
                    np.fromstring(data, dtype=float, sep='\t').reshape(
                        self.nsamples, self.ntimepoints))
                # store id
                self.channelids.append(id)

        # reshape data from (channels x samples x timepoints) to
        # (samples x chanels x timepoints)
        self.data = np.swapaxes(np.array(self.data), 0, 1)
        def get_UDP(self, Npackets, LO_freq, skip_packets=2, channels = None):
		#Npackets = np.int(time_interval * self.accum_freq)
		I_buffer = np.empty((Npackets + skip_packets, len(channels)))
		Q_buffer = np.empty((Npackets + skip_packets, len(channels)))
		self.fpga.write_int('pps_start', 1)
		count = 0
		while count < Npackets + skip_packets:
			packet = self.s.recv(8192) 
			data = np.fromstring(packet,dtype = '<i').astype('float')
			data /= 2.0**17
			data /= (self.accum_len/512.)
			ts = (np.fromstring(packet[-4:],dtype = '<i').astype('float')/ self.fpga_samp_freq)*1.0e3 # ts in ms
			odd_chan = channels[1::2]
			even_chan = channels[0::2]
			I_odd = data[1024 + ((odd_chan - 1) / 2)]	
			Q_odd = data[1536 + ((odd_chan - 1) /2)]	
			I_even = data[0 + (even_chan/2)]	
			Q_even = data[512 + (even_chan/2)]	
			even_phase = np.arctan2(Q_even,I_even)
			odd_phase = np.arctan2(Q_odd,I_odd)
			if len(channels) % 2 > 0:
				I = np.hstack(zip(I_even[:len(I_odd)], I_odd))
				Q = np.hstack(zip(Q_even[:len(Q_odd)], Q_odd))
				I = np.hstack((I, I_even[-1]))	
				Q = np.hstack((Q, Q_even[-1]))	
				I_buffer[count] = I
				Q_buffer[count] = Q
			else:
				I = np.hstack(zip(I_even, I_odd))
				Q = np.hstack(zip(Q_even, Q_odd))
				I_buffer[count] = I
				Q_buffer[count] = Q
				
			count += 1
		return I_buffer[skip_packets:],Q_buffer[skip_packets:]
def read_wave(filename='sound.wav'):
    # Reads a wave file
    # filename: string
    # return: Wave

    fp = open_wave(filename, 'r')

    nchannels = fp.getnchannels()	# number of audio channels (1 for mono, 2 for stereo)
    nframes = fp.getnframes()		# number of audio frames
    sampwidth = fp.getsampwidth()	# sample width in bytes
    framerate = fp.getframerate()	# sampling frequency
    
    z_str = fp.readframes(nframes)	# reads and returns at most nframes of audio as a string of bytes
    
    fp.close()

    dtype_map = {1:np.int8, 2:np.int16, 3:'special', 4:np.int32}
    if sampwidth not in dtype_map:
        raise ValueError('sampwidth %d unknown' % sampwidth)
    
    if sampwidth == 3:
        xs = np.fromstring(z_str, dtype=np.int8).astype(np.int32)
        ys = (xs[2::3] * 256 + xs[1::3]) * 256 + xs[0::3]
    else:
        ys = np.fromstring(z_str, dtype=dtype_map[sampwidth])

    # if it's in stereo, just pull out the first channel
    if nchannels == 2:
        ys = ys[::2]

    #ts = np.arange(len(ys)) / framerate
    wave = Wave(ys, framerate=framerate)
    wave.normalize()

    return wave
	def get_stream(self, chan, time_interval):
		self.fpga.write_int('pps_start', 1)
		#self.phases = np.empty((len(self.freqs),Npackets))
		Npackets = np.int(time_interval * self.accum_freq)
		Is = np.empty(Npackets)
		Qs = np.empty(Npackets)
		phases = np.empty(Npackets)
		count = 0
		while count < Npackets:
			packet = self.s.recv(8192 + 42) # total number of bytes including 42 byte header
			header = np.fromstring(packet[:42],dtype = '<B')
			roach_mac = header[6:12]
			filter_on = np.array([2, 68, 1, 2, 13, 33])
			if np.array_equal(roach_mac,filter_on):
				data = np.fromstring(packet[42:],dtype = '<i').astype('float')
				data /= 2.0**17
				data /= (self.accum_len/512.)
				ts = (np.fromstring(packet[-4:],dtype = '<i').astype('float')/ self.fpga_samp_freq)*1.0e3 # ts in ms
				# To stream one channel, make chan an argument
				if (chan % 2) > 0:
					I = data[1024 + ((chan - 1) / 2)]	
					Q = data[1536 + ((chan - 1) /2)]	
				else:
					I = data[0 + (chan/2)]	
					Q = data[512 + (chan/2)]	
				phase = np.arctan2([Q],[I])
				Is[count]=I
				Qs[count]=Q
				phases[count]=phase
			else:
				continue
			count += 1
		return Is, Qs, phases
Example #8
0
 def stats(self, out_counts, out_adj, adj_index = string.ascii_letters + ' '):
   """Given two input arrays this adds to them the statistics of the contained text. The first array is of length 256, and counts the instances of character codes. The second array is 2D, with ['a', 'b'] being the number of times a 'b' follows an 'a'. It is indexed by adj_index however, and character pairs that contain a character not included are not counted."""
   
   # Counts are relativly easy - convert and histogram...
   text_codes = numpy.fromstring(self.text.encode('utf8'), dtype=numpy.uint8)
   out_counts += numpy.bincount(text_codes, minlength=256)
   
   # Adjacencies require a little more sneakyness...
   # First convert the codes array into an index into the adj_index, with entrys that are not in it set to -1...
   adj_codes = numpy.fromstring(adj_index, dtype=numpy.uint8)
   
   cap = len(adj_index) * len(adj_index)
   conversion = numpy.empty(256, dtype=numpy.int64)
   conversion[:] = cap
   conversion[adj_codes] = numpy.arange(adj_codes.shape[0])
   
   text_codes = conversion[text_codes]
   
   # Now take adjacent pairs, and calculate the 1D index in out_adj matrix...
   pos = (text_codes[:-1] * len(adj_index)) + text_codes[1:]
   
   # Lose values that are too large - they are pairs we do not record...
   pos = pos[pos < cap]
   
   # Histogram and sum into the adjacency matrix...
   if pos.shape[0]>0:
     out_adj += numpy.bincount(pos, minlength=cap).reshape((len(adj_index),len(adj_index)))
     def _read_variable_data(self, raw_data):

        """ Read the raw data and set the variable values. """

        if('real' in self.flags):
            number_of_columns = self.number_of_variables
        elif('complex' in self.flags):
            number_of_columns = 2*self.number_of_variables
        else:
            raise NotImplementedError

        if('Transient' in self.plot_name): #Tran
            number_of_columns = self.number_of_variables+1
            input_data = np.fromstring(raw_data, count=number_of_columns*self.number_of_points, dtype='float32')
            input_data = input_data.reshape((self.number_of_points, number_of_columns))
            input_data = input_data.transpose()
            time = input_data [0:2]
            tmpdata= time.transpose().flatten().tostring()
            time=np.fromstring(tmpdata, count=self.number_of_points, dtype='float64')
            time=np.absolute(time)
            input_data = input_data [1:]
            input_data[0]=time
        else:
            input_data = np.fromstring(raw_data, count=number_of_columns*self.number_of_points, dtype='float64')
            input_data = input_data.reshape((self.number_of_points, number_of_columns))
            input_data = input_data.transpose()
        #input_data = input_data [1:]
        #np.savetxt('raw.txt', input_data)
        if 'complex' in self.flags:
            raw_data = input_data
            input_data = np.array(raw_data[0::2], dtype='complex64')
            input_data.imag = raw_data[1::2]
        for variable in self.variables.values():
            variable.data = input_data[variable.index]
Example #10
0
def decTY1(raw_8, raw_16=None, raw_32=None):
    """
    Modified byte offset decompressor used in Oxford Diffraction images
    @param raw_8,raw_16,raw_32: strings containing raw data with integer of the given size
    @return numpy.ndarray 
    """
    data = numpy.fromstring(raw_8, dtype="uint8").astype(int)
    data -= 127
    if raw_32 is not None:
        int32 = numpy.fromstring(raw_32, dtype="int32").astype(int)
        exception32 = numpy.nonzero(data == 128)
    if raw_16 is not None:
        int16 = numpy.fromstring(raw_16, dtype="int16").astype(int)
        exception16 = numpy.nonzero(data == 127)
        data[exception16] = int16
    if raw_32:
        data[exception32] = int32
    summed = data.cumsum()
    smax = summed.max()
    if (smax > (2 ** 31 - 1)):
        bytecode = "int64"
    elif (smax > (2 ** 15 - 1)):
        bytecode = "int32"
    elif (smax > (2 ** 7 - 1)):
        bytecode = "int16"
    else:
        bytecode = "int8"
    return summed.astype(bytecode)
Example #11
0
def send(cloudstream, data, timeout = 0.25):
    sock = cloudstream.socket
    #print data.shape
    data = data.reshape(-1, 1)
    #print data.shape
    sock.send("upload")
    alive = time.time()
    while time.time() - alive < timeout:
        try:
            msg = sock.recv_multipart(flags = zmq.DONTWAIT)
            pstart, size = msg
            pstart = np.fromstring(pstart, "int32", 1)[0] * 3
            size = np.fromstring(size, "int32", 1)[0] * 3
            pend = pstart + size
            #print "sending", pstart, pend
            #print("sending", pstart, pend, data.shape[0])
            if pstart < 0:
                return True
            elif pstart <= data.shape[0]:
                #print data.shape, data.dtype, pstart, pend
                #print "subdata", data[pstart:min(data.shape[0], pend)].shape
                chunk = data[pstart:min(data.shape[0], pend)].tostring()
                #print len(chunk)
                sock.send(chunk)
            alive = time.time()
        except zmq.error.Again:
            continue
    print "Connection timeout"
    return False
Example #12
0
    def _read_bucket(self, doc, column_set, column_dtypes, include_symbol, include_images, columns):
        rtn = {}
        if doc[VERSION] != 3:
            raise ArcticException("Unhandled document version: %s" % doc[VERSION])
        rtn[INDEX] = np.cumsum(np.fromstring(lz4.decompress(doc[INDEX]), dtype='uint64'))
        doc_length = len(rtn[INDEX])
        rtn_length = len(rtn[INDEX])
        if include_symbol:
            rtn['SYMBOL'] = [doc[SYMBOL], ] * rtn_length
        column_set.update(doc[COLUMNS].keys())
        for c in column_set:
            try:
                coldata = doc[COLUMNS][c]
                dtype = np.dtype(coldata[DTYPE])
                values = np.fromstring(lz4.decompress(coldata[DATA]), dtype=dtype)
                self._set_or_promote_dtype(column_dtypes, c, dtype)
                rtn[c] = self._empty(rtn_length, dtype=column_dtypes[c])
                rowmask = np.unpackbits(np.fromstring(lz4.decompress(coldata[ROWMASK]),
                                                      dtype='uint8'))[:doc_length].astype('bool')
                rtn[c][rowmask] = values
            except KeyError:
                rtn[c] = None

        if include_images and doc.get(IMAGE_DOC, {}).get(IMAGE, {}):
            rtn = self._prepend_image(rtn, doc[IMAGE_DOC], rtn_length, column_dtypes, column_set, columns)
        return rtn
Example #13
0
 def getcorr(self):
     self.u.write_int('corr0_ctrl',0)
     self.u.write_int('corr1_ctrl',0)
     self.u.write_int('corr0_ctrl',1)
     self.u.write_int('corr1_ctrl',1)
     done = False
     while not done:
         a = self.u.read_int('corr0_addr')
         b = self.u.read_int('corr1_addr')
         if a > 0 and b > 0:
             done = True
     depth = 36*3*self.nch/4
     l0 = np.fromstring(self.u.read('corr0_bram_lsb',depth*4),dtype='int16').byteswap().astype('float')
     l0 = l0[::2] + l0[1::2]*1j
     m0 = np.fromstring(self.u.read('corr0_bram_msb',depth*4),dtype='int16').byteswap().astype('float')
     m0 = m0[::2] + m0[1::2]*1j
     l1 = np.fromstring(self.u.read('corr1_bram_lsb',depth*4),dtype='int16').byteswap().astype('float')
     l1 = l1[::2] + l1[1::2]*1j
     m1 = np.fromstring(self.u.read('corr1_bram_msb',depth*4),dtype='int16').byteswap().astype('float')
     m1 = m1[::2] + m1[1::2]*1j
     
     c = np.zeros((3,36,self.nch),dtype='complex')
     for k in range(36):
         s = np.zeros((3*self.nch,),dtype='complex')
         s[0::4] = m0[k::36]
         s[1::4] = l0[k::36]
         s[2::4] = m1[k::36]
         s[3::4] = l1[k::36]
         for t in range(3):
             c[t,k,:] = s[(t*self.nch):((t+1)*self.nch)]
     return c
Example #14
0
def alltoallv_string(send_dict, comm=world):
    scounts = np.zeros(comm.size, dtype=np.int)
    sdispls = np.zeros(comm.size, dtype=np.int)
    stotal = 0
    for proc in range(comm.size):
        if proc in send_dict:
            data = np.fromstring(send_dict[proc],np.int8)
            scounts[proc] = data.size
            sdispls[proc] = stotal
            stotal += scounts[proc]

    rcounts = np.zeros(comm.size, dtype=np.int)
    comm.alltoallv( scounts, np.ones(comm.size, dtype=np.int), np.arange(comm.size, dtype=np.int),
                    rcounts, np.ones(comm.size, dtype=np.int), np.arange(comm.size, dtype=np.int) )
    rdispls = np.zeros(comm.size, dtype=np.int)
    rtotal = 0
    for proc in range(comm.size):
        rdispls[proc] = rtotal
        rtotal += rcounts[proc]
        rtotal += rcounts[proc]


    sbuffer = np.zeros(stotal, dtype=np.int8)
    for proc in range(comm.size):
        sbuffer[sdispls[proc]:(sdispls[proc]+scounts[proc])] = np.fromstring(send_dict[proc],np.int8)

    rbuffer = np.zeros(rtotal, dtype=np.int8)
    comm.alltoallv(sbuffer, scounts, sdispls, rbuffer, rcounts, rdispls)

    rdict = {}
    for proc in range(comm.size):
        rdict[proc] = rbuffer[rdispls[proc]:(rdispls[proc]+rcounts[proc])].tostring()

    return rdict
Example #15
0
def decode_req(metadata, data, data2):
    a, b, iterations, l, w, array_dtype, l2, w2, array_dtype2 = metadata.split('|')
    syn0 = np.fromstring(data, dtype=array_dtype)
    syn0 = syn0.reshape(int(l), int(w))
    syn1 = np.fromstring(data2, dtype=array_dtype2).reshape(int(l2), int(w2))
    conf = (syn0, syn1)
    return int(a), int(b), int(iterations), conf
    def setUp(self):
        self.sequence_kinds = frozenset([
            str,
            lambda s: np.fromstring(s, dtype='|S1'),
            lambda s: np.fromstring(s, dtype=np.uint8)])

        dna_str = 'ACGTMRWSYKVHDBN.-'
        dna_comp_str = 'TGCAKYWSRMBDHVN.-'
        dna_rev_comp_str = '-.NVHDBMRSWYKACGT'
        rna_str = 'ACGUMRWSYKVHDBN.-'
        rna_comp_str = 'UGCAKYWSRMBDHVN.-'
        rna_rev_comp_str = '-.NVHDBMRSWYKACGU'
        qual = tuple(range(len(dna_str)))

        self.dna = (DNA, dna_str)
        self.rna = (RNA, rna_str)

        dna_comp = self.dna + (dna_comp_str,)
        rna_comp = self.rna + (rna_comp_str,)

        dna_comp_qual = dna_comp + (qual,)
        rna_comp_qual = rna_comp + (qual,)
        self.all_combos_comp_qual = (dna_comp_qual, rna_comp_qual)

        dna_rev_comp = self.dna + (dna_rev_comp_str,)
        rna_rev_comp = self.rna + (rna_rev_comp_str,)
        self.all_combos_rev_comp = (dna_rev_comp, rna_rev_comp)

        dna_rev_comp_qual = dna_rev_comp + (qual,)
        rna_rev_comp_qual = rna_rev_comp + (qual,)
        self.all_combos_rev_comp_qual = \
            (dna_rev_comp_qual, rna_rev_comp_qual)
Example #17
0
    def read_ermapper(self, ifile):
        '''Read in a DEM file and associated .ers file'''
        ers_index = ifile.find('.ers')
        if ers_index > 0:
            data_file = ifile[0:ers_index]
            header_file = ifile
        else:
            data_file = ifile
            header_file = ifile + '.ers'

        self.header = self.read_ermapper_header(header_file)

        nroflines = int(self.header['nroflines'])
        nrofcellsperlines = int(self.header['nrofcellsperline'])
        self.data = self.read_ermapper_data(data_file, offset=int(self.header['headeroffset']))
        self.data = numpy.reshape(self.data,(nroflines,nrofcellsperlines))

        longy =  numpy.fromstring(self.getHeaderParam('longitude'), sep=':')
        latty =  numpy.fromstring(self.getHeaderParam('latitude'), sep=':')

        self.deltalatitude = float(self.header['ydimension'])
        self.deltalongitude = float(self.header['xdimension'])

        if longy[0] < 0:
            self.startlongitude = longy[0]+-((longy[1]/60)+(longy[2]/3600))
            self.endlongitude = self.startlongitude - int(self.header['nrofcellsperline'])*self.deltalongitude
        else:
            self.startlongitude = longy[0]+(longy[1]/60)+(longy[2]/3600)
            self.endlongitude = self.startlongitude + int(self.header['nrofcellsperline'])*self.deltalongitude
        if latty[0] < 0:
            self.startlatitude = latty[0]-((latty[1]/60)+(latty[2]/3600))
            self.endlatitude = self.startlatitude - int(self.header['nroflines'])*self.deltalatitude
        else:
            self.startlatitude = latty[0]+(latty[1]/60)+(latty[2]/3600)
            self.endlatitude = self.startlatitude + int(self.header['nroflines'])*self.deltalatitude
Example #18
0
 def distribute_on_root(self):
     attribute_names = self.particles.get_attribute_names_defined_in_store()
     
     values = self.particles.get_values_in_store(
         self.particles.get_all_indices_in_store(), 
         attribute_names
     )
     units = [x.unit for x in values]
     units_dump = pickle.dumps(units)
     attributes_dump = pickle.dumps(attribute_names)
     
     units_dump = numpy.fromstring(units_dump,dtype='uint8')
     attributes_dump = numpy.fromstring(attributes_dump,dtype='uint8')
     
     sendbuffer = numpy.zeros(4,  dtype='int64')
     sendbuffer[0] = self.shared_id
     sendbuffer[1] = len(self.particles)
     sendbuffer[2] = len(units_dump)
     sendbuffer[3] = len(attributes_dump)
     
     self.concurrent_processes.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.concurrent_processes.ROOT)
     
     sendbuffer = self.particles.key
     self.concurrent_processes.mpi_comm.Bcast([sendbuffer, MPI.INTEGER8], root=self.concurrent_processes.ROOT)
     
     attribute_names = self.particles.get_attribute_names_defined_in_store()
    
     self.concurrent_processes.mpi_comm.Bcast([units_dump, MPI.CHARACTER], root=self.concurrent_processes.ROOT)
     self.concurrent_processes.mpi_comm.Bcast([attributes_dump, MPI.CHARACTER], root=self.concurrent_processes.ROOT)
     
     for x, unit in zip(values, units):
         value = x.value_in(unit)
         self.concurrent_processes.mpi_comm.Bcast([value, MPI.DOUBLE], root=self.concurrent_processes.ROOT)
Example #19
0
def convertTableRows2np(tableRows):
    table = []
    try:
        for row in tableRows:
            tablerow = list(np.fromstring(sciNoteFix(row.strip()), sep=' '))
            if tablerow == [-1]:
                pass
            else:
                table.append(tablerow)
        if table != []:
            return np.array(table)
        else:
            # if table is empty, this is expected for the chi table, which is
            # formatted differently in the njoy output than the others.
            for row in tableRows:
                tablerow = list(np.fromstring(sciNoteFix(row[16:].strip()), sep=' '))
                if tablerow == [-1]:
                    pass
                else:
                    table.append(tablerow)
            if table is None:
                print("Warning: Table conversion failure. Blank table.")
            else:
                return np.array(sum(table, []))
    except:
        # we have a str lurking in the table
        print("Warning: Failure to convert the table into a numpy array")
Example #20
0
    def _read_symbology_block(self, buf2):
        """ Read symbology block. """
        # Read and decode symbology header
        self.symbology_header = _unpack_from_buf(buf2, 0, SYMBOLOGY_HEADER)

        # Read radial packets
        packet_code = struct.unpack('>h', buf2[16:18])[0]
        assert packet_code in SUPPORTED_PACKET_CODES
        self.packet_header = _unpack_from_buf(buf2, 16, RADIAL_PACKET_HEADER)
        self.radial_headers = []
        nbins = self.packet_header['nbins']
        nradials = self.packet_header['nradials']
        nbytes = _unpack_from_buf(buf2, 30, RADIAL_HEADER)['nbytes']
        if packet_code == 16 and nbytes != nbins:
            nbins = nbytes  # sometimes these do not match, use nbytes
        self.raw_data = np.empty((nradials, nbins), dtype='uint8')
        pos = 30

        for radial in self.raw_data:
            radial_header = _unpack_from_buf(buf2, pos, RADIAL_HEADER)
            pos += 6
            if packet_code == 16:
                radial[:] = np.fromstring(buf2[pos:pos+nbins], '>u1')
                pos += radial_header['nbytes']
            else:
                assert packet_code == AF1F
                # decode run length encoding
                rle_size = radial_header['nbytes'] * 2
                rle = np.fromstring(buf2[pos:pos+rle_size], dtype='>u1')
                colors = np.bitwise_and(rle, 0b00001111)
                runs = np.bitwise_and(rle, 0b11110000) // 16
                radial[:] = np.repeat(colors, runs)
                pos += rle_size
            self.radial_headers.append(radial_header)
Example #21
0
def readGridBuilderSlice(filename):
    mesh = triangleMesh()
    filename = os.path.splitext(filename)[0]
    # Node coordinates
    # so what?
    gbXYCdtype = np.dtype('2f8')
    f = open(''.join([filename,'.xyc']))
    nNodes = struct.unpack('3i',f.read(12))[1]
    nDataBytes = struct.unpack('i',f.read(4))[0]
    data = np.fromstring(f.read(nDataBytes),gbXYCdtype)
    f.close()
    mesh.nodes.numbers = np.arange(nNodes)
    mesh.nodes.coordinates = data
    mesh.nodes.markers = np.ones([nNodes,1], dtype=int)
    
    # Element incidences
    gbIN3dtype = np.dtype('3i')
    f = open(''.join([filename,'.in3']))
    nElements = struct.unpack('3i',f.read(12))[1]
    nDataBytes = struct.unpack('i',f.read(4))[0]
    data = np.fromstring(f.read(nDataBytes),gbIN3dtype)
    mesh.elements.numbers = np.arange(nElements)
    mesh.elements.nodes = data-1
    mesh.elements.markers = np.ones([nElements,1],dtype=int)
    f.close()
    return mesh
Example #22
0
    def pad(self):
        """
        Pads the block to have k symbols of each symbolsize bytes.
        Each symbol will be interepreted as an array of unsigned integers
        """

        # loop through checking each symbol
        for i in xrange(len(self)):

            # Look for strings that are not numpy arrays
            if not isinstance(self[i], numpy.ndarray):

                # Figure out padding if any
                difference = self.symbolsize - len(self[i])
                self.padding += difference
                self[i] += b'\x00' * difference

                # Convert to numpy array
                self[i] = numpy.fromstring(self[i], dtype=self.dtype)

        # Add as many zero symbols as necessary to have a full block
        for i in xrange(len(self), self.k):
            src = b'\x00' * self.symbolsize
            self.padding += self.symbolsize
            array = numpy.fromstring(src, dtype=self.dtype)
            self.append(array)
def read_MSR_depth_ims(depth_file, resize='VGA'):
	''' Extracts depth images and masks from the MSR Daily Activites dataset
	---Parameters---
	depth_file : filename for set of depth images (.bin file)
	'''

	file_ = open(depth_file, 'rb')

	''' Get header info '''
	frames = np.fromstring(file_.read(4), dtype=np.int32)[0]
	cols = np.fromstring(file_.read(4), dtype=np.int32)[0]
	rows = np.fromstring(file_.read(4), dtype=np.int32)[0]

	''' Get depth/mask image data '''
	data = file_.read()

	'''
	Depth images and mask images are stored together per row.
	Thus we need to extract each row of size n_cols+n_rows
	'''
	dt = np.dtype([('depth', np.int32, cols), ('mask', np.uint8, cols)])

	''' raw -> usable images '''
	frame_data = np.fromstring(data, dtype=dt)
	depthIms = frame_data['depth'].astype(np.uint16).reshape([frames, rows, cols])
	maskIms = frame_data['mask'].astype(np.uint16).reshape([frames, rows, cols])

	if resize == 'VGA':
		# embed()
		depthIms = np.dstack([cv2.resize(depthIms[d,:,:], (640,480)) for d in xrange(len(depthIms))])
		maskIms = np.dstack([cv2.resize(maskIms[d,:,:], (640,480)) for d in xrange(len(maskIms))])

	return depthIms, maskIms
def _read_data(record, start, end, info):
    """Read the binary data for each signal"""
    datfile = record + '.dat'
    samp_to_read = end - start

    # verify against first value in header
    with open(datfile, 'rb') as f:
        data = _arr_to_data(numpy.fromstring(f.read(3),
                        dtype=numpy.uint8).reshape(1,3))

    if [data[0, 2], data[0, 3]] != info['first_values']:
        warnings.warn(
            'First value from dat file does not match value in header')
    
    # read into an array with 3 bytes in each row
    with open(datfile, 'rb') as f:
        f.seek(start*3)
        arr = numpy.fromstring(f.read(3*samp_to_read),
                dtype=numpy.uint8).reshape((samp_to_read, 3))

    data = _arr_to_data(arr)

    # adjust zerovalue and gain
    data[:, 2] = (data[:, 2] - info['zero_values'][0]) / info['gains'][0]
    data[:, 3] = (data[:, 3] - info['zero_values'][1]) / info['gains'][1]

    # time columns
    data[:, 0] = numpy.arange(start, end)  # elapsed time in samples
    data[:, 1] = (numpy.arange(samp_to_read) + start
                  ) / info['samp_freq'] # in sec
    return data
Example #25
0
    def read(self, fname, dim1, dim2, offset=0, bytecode="int32", endian="<"):
        """ 
        Read a binary image
        Parameters : fname, dim1, dim2, offset, bytecode, endian
        fname : file name : str
        dim1,dim2 : image dimensions : int
        offset : size of the : int 
        bytecode among : "int8","int16","int32","int64","uint8","uint16","uint32","uint64","float32","float64",...
        endian among short or long endian ("<" or ">")
        """
        self.filename = fname
        self.dim1 = dim1
        self.dim2 = dim2
        self.bytecode = bytecode
        f = open(self.filename, "rb")
        dims = [dim2, dim1]
        bpp = len(numpy.array(0, bytecode).tostring())
        size = dims[0] * dims[1] * bpp

        f.seek(offset)
        rawData = f.read(size)
        if  self.swap_needed(endian):
            data = numpy.fromstring(rawData, bytecode).byteswap().reshape(tuple(dims))
        else:
            data = numpy.fromstring(rawData, bytecode).reshape(tuple(dims))
        self.data = data
        return self
Example #26
0
def _triage_write(key, value, root, comp_kw):
    if isinstance(value, dict):
        sub_root = _create_titled_group(root, key, 'dict')
        for key, sub_value in value.items():
            if not isinstance(key, string_types):
                raise TypeError('All dict keys must be strings')
            _triage_write('key_{0}'.format(key), sub_value, sub_root, comp_kw)
    elif isinstance(value, (list, tuple)):
        title = 'list' if isinstance(value, list) else 'tuple'
        sub_root = _create_titled_group(root, key, title)
        for vi, sub_value in enumerate(value):
            _triage_write('idx_{0}'.format(vi), sub_value, sub_root, comp_kw)
    elif isinstance(value, type(None)):
        _create_titled_dataset(root, key, 'None', [False])
    elif isinstance(value, (int, float)):
        if isinstance(value, int):
            title = 'int'
        else:  # isinstance(value, float):
            title = 'float'
        _create_titled_dataset(root, key, title, np.atleast_1d(value))
    elif isinstance(value, string_types):
        if isinstance(value, text_type):  # unicode
            value = np.fromstring(value.encode('utf-8'), np.uint8)
            title = 'unicode'
        else:
            value = np.fromstring(value.encode('ASCII'), np.uint8)
            title = 'ascii'
        _create_titled_dataset(root, key, title, value, comp_kw)
    elif isinstance(value, np.ndarray):
        _create_titled_dataset(root, key, 'ndarray', value)
    else:
        raise TypeError('unsupported type %s' % type(value))
Example #27
0
def EMC_ReadData(filePath) :
    """ USAGE:
        reads a CSR sparse matrix from file and converts it
        to a Matrix library object in a CSR format
    
        PARAMETERS:
        filePath - full/relative path to the data file
    """
    
    # open file for reading
    inFile = open(filePath, "r")

    # read matrix shape
    matrixShape = numpy.fromstring(inFile.readline(), dtype = 'int', sep = ',');

    # read matrix data, indices and indptr
    data = numpy.fromstring(inFile.readline(), dtype = 'float', sep = ',');
    indices = numpy.fromstring(inFile.readline(), dtype = 'int', sep = ',');
    indptr = numpy.fromstring(inFile.readline(), dtype = 'int', sep = ',');

    # close file
    inFile.close()

    return sparse.csr_matrix((data, indices, indptr),
                             shape = (matrixShape[0], matrixShape[1]))
Example #28
0
def read_wave(filename='sound.wav'):
    """Reads a wave file.

    filename: string

    returns: Wave
    """
    fp = open_wave(filename, 'r')

    nchannels = fp.getnchannels()
    nframes = fp.getnframes()
    sampwidth = fp.getsampwidth()
    framerate = fp.getframerate()
    
    z_str = fp.readframes(nframes)
    
    fp.close()

    dtype_map = {1:numpy.int8, 2:numpy.int16, 3:'special', 4:numpy.int32}
    if sampwidth not in dtype_map:
        raise ValueError('sampwidth %d unknown' % sampwidth)
    
    if sampwidth == 3:
        xs = numpy.fromstring(z_str, dtype=numpy.int8).astype(numpy.int32)
        ys = (xs[2::3] * 256 + xs[1::3]) * 256 + xs[0::3]
    else:
        ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])

    # if it's in stereo, just pull out the first channel
    if nchannels == 2:
        ys = ys[::2]

    wave = Wave(ys, framerate)
    return wave
Example #29
0
def ReadWav(filename):
  wav = None
  try:
    wav = wave.open(filename, 'r')
    channels = wav.getnchannels()
    sample_bytes = wav.getsampwidth()

    wav_data = wav.readframes(wav.getnframes())

    if (sample_bytes == 2):
      wav_array = numpy.fromstring(wav_data, dtype=numpy.int16)
    elif (sample_bytes == 1):
      wav_array = numpy.fromstring(wav_data, dtype=numpy.int8)
    else:
      raise ValueError('Sample depth of %d bytes not supported' % sample_bytes)

    float_array = numpy.zeros(wav_array.shape[0] / channels)
    for i in range(channels):
      float_array += wav_array[i::channels]
    float_array /= max(abs(float_array))
  finally:
    if wav:
      wav.close()

  return float_array
Example #30
0
def faces_load_data():
 
    skip_rows = 1
    train_size = 28709
    test_size = 3589
    dim = 48
    X_train = np.empty([train_size,dim, dim])
    X_test = np.empty([test_size, dim, dim])
    y_train = np.empty(train_size)
    y_test = np.empty(test_size)
    
    f = open('fer2013/fer2013.csv', 'rb')
 
    train_index = test_index = 0
    for i, line in enumerate(f):
        if i >= skip_rows:
            split_line = line.split(",")
            usage = split_line[2].rstrip()
            if usage == 'Training':
                X_train[train_index, :,:] = np.fromstring(split_line[1], dtype = 'int', sep = ' ').reshape(dim, dim)
                y_train[train_index] = int(split_line[0])
                train_index += 1
            elif usage == 'PublicTest':
                X_test[test_index, :,:] = np.fromstring(split_line[1], dtype = 'int', sep = ' ').reshape(dim, dim)
                y_test[test_index] = int(split_line[0])
                test_index += 1
                 
    return (X_train, y_train) , (X_test, y_test)
Example #31
0
def plotFamilies(rtable, ftable, ctable, opt):
    """
    Creates a multi-paneled family plot.
    
    rtable: Repeater table
    ftable: Families table
    opt: Options object describing station/run parameters
    
    Top row: Ordered waveforms
    Middle row: Timeline of amplitude
    Bottom row: Timeline of event spacing 
    """

    # Adjust the font face
    matplotlib.rcParams['font.family'] = 'sans-serif'
    matplotlib.rcParams['font.sans-serif'] = ['Arial']
    matplotlib.rcParams['font.size'] = 8.0

    # Load into memory
    startTimeMPL = rtable.cols.startTimeMPL[:]
    startTime = rtable.cols.startTime[:]
    windowAmp = rtable.cols.windowAmp[:][:, opt.printsta]
    windowStart = rtable.cols.windowStart[:]
    fi = rtable.cols.FI[:]
    ids = rtable.cols.id[:]
    id1 = ctable.cols.id1[:]
    id2 = ctable.cols.id2[:]
    ccc = ctable.cols.ccc[:]

    # Station names
    stas = opt.station.split(',')
    chas = opt.channel.split(',')

    for cnum in range(ftable.attrs.nClust):

        fam = np.fromstring(ftable[cnum]['members'], dtype=int, sep=' ')
        core = ftable[cnum]['core']

        # Prep catalog
        catalogind = np.argsort(startTimeMPL[fam])
        catalog = startTimeMPL[fam][catalogind]
        longevity = ftable[cnum]['longevity']
        spacing = np.diff(catalog) * 24
        minind = fam[catalogind[0]]
        maxind = fam[catalogind[-1]]

        if ftable.cols.printme[cnum] != 0:

            fig = plt.figure(figsize=(10, 12))

            # Plot waveforms
            ax1 = fig.add_subplot(9, 3, (1, 8))

            # If only one station, plot all aligned waveforms
            if opt.nsta == 1:

                famtable = rtable[fam]
                n = -1
                data = np.zeros((len(fam), int(opt.winlen * 2)))
                for r in famtable:
                    n = n + 1
                    waveform = r['waveform'][0:opt.wshape]
                    tmp = waveform[max(
                        0, windowStart[fam[n]] - int(opt.ptrig * opt.samprate)
                    ):min(opt.wshape, windowStart[fam[n]] +
                          int(opt.atrig * opt.samprate))]
                    data[n, :] = tmp[int(opt.ptrig * opt.samprate -
                                         opt.winlen *
                                         0.5):int(opt.ptrig * opt.samprate +
                                                  opt.winlen *
                                                  1.5)] / windowAmp[fam[n]]
                if len(fam) > 12:
                    ax1.imshow(data,
                               aspect='auto',
                               vmin=-1,
                               vmax=1,
                               cmap='RdBu',
                               interpolation='nearest',
                               extent=[
                                   -1 * opt.winlen * 0.5 / opt.samprate,
                                   opt.winlen * 1.5 / opt.samprate, n + 0.5,
                                   -0.5
                               ])
                    tvec = [
                        -1 * opt.winlen * 0.5 / opt.samprate,
                        opt.winlen * 1.5 / opt.samprate
                    ]
                else:
                    tvec = np.arange(-opt.winlen * 0.5 / opt.samprate,
                                     opt.winlen * 1.5 / opt.samprate,
                                     1 / opt.samprate)
                    for o in range(len(fam)):
                        dat = data[o, :]
                        dat[dat > 1] = 1
                        dat[dat < -1] = -1
                        ax1.plot(tvec, dat / 2 - o, 'k', linewidth=0.25)

            # Otherwise, plot cores and stacks from all stations
            else:

                r = rtable[core]
                famtable = rtable[fam]
                tvec = np.arange(-opt.winlen * 0.5 / opt.samprate,
                                 opt.winlen * 1.5 / opt.samprate,
                                 1 / opt.samprate)
                for s in range(opt.nsta):

                    dats = np.zeros((int(opt.winlen * 2), ))
                    waveform = famtable['waveform'][:, s * opt.wshape:(s + 1) *
                                                    opt.wshape]
                    for n in range(len(fam)):
                        tmps = waveform[
                            n,
                            max(
                                0, windowStart[fam[n]] -
                                int(opt.ptrig * opt.samprate)):min(
                                    opt.wshape, windowStart[fam[n]] +
                                    int(opt.atrig * opt.samprate)
                                )] / (famtable['windowAmp'][n, s] + 1.0 / 1000)
                        tmps[tmps > 1] = 1
                        tmps[tmps < -1] = -1
                        try:
                            dats = dats + tmps[int(opt.ptrig * opt.samprate -
                                                   opt.winlen *
                                                   0.5):int(opt.ptrig *
                                                            opt.samprate +
                                                            opt.winlen * 1.5)]
                        except ValueError:
                            pass
                    dats = dats / (max(dats) + 1.0 / 1000)
                    dats[dats > 1] = 1
                    dats[dats < -1] = -1
                    ax1.plot(tvec, dats - 1.75 * s, 'r', linewidth=1)

                    waveformc = r['waveform'][s * opt.wshape:(s + 1) *
                                              opt.wshape]
                    tmpc = waveformc[max(
                        0, r['windowStart'] - int(opt.ptrig * opt.samprate)
                    ):min(opt.wshape, r['windowStart'] +
                          int(opt.atrig * opt.samprate))]
                    datc = tmpc[int(opt.ptrig * opt.samprate - opt.winlen * 0.5
                                    ):int(opt.ptrig * opt.samprate +
                                          opt.winlen * 1.5)] / (
                                              r['windowAmp'][s] + 1.0 / 1000)
                    datc[datc > 1] = 1
                    datc[datc < -1] = -1
                    ax1.plot(tvec, datc - 1.75 * s, 'k', linewidth=0.25)
                    ax1.text(np.min(tvec) - 0.1,
                             -1.75 * s,
                             '{0}\n{1}'.format(stas[s], chas[s]),
                             horizontalalignment='right',
                             verticalalignment='center')

            ax1.axvline(x=-0.1 * opt.winlen / opt.samprate,
                        color='k',
                        ls='dotted')
            ax1.axvline(x=0.9 * opt.winlen / opt.samprate,
                        color='k',
                        ls='dotted')
            ax1.get_yaxis().set_visible(False)
            ax1.set_xlim((np.min(tvec), np.max(tvec)))
            if opt.nsta > 1:
                ax1.set_ylim((-1.75 * s - 1, 1))
            ax1.set_xlabel('Time Relative to Trigger (sec)')

            # Plot mean FFT
            ax2 = fig.add_subplot(9, 3, (3, 9))
            ax2.set_xlabel('Frequency (Hz)')
            ax2.get_yaxis().set_visible(False)
            r = rtable[core]
            famtable = rtable[fam]
            freq = np.linspace(0, opt.samprate / 2, opt.winlen / 2)
            fftc = np.zeros((opt.winlen / 2, ))
            fftm = np.zeros((opt.winlen / 2, ))
            for s in range(opt.nsta):
                fft = np.abs(
                    np.real(r['windowFFT'][s * opt.winlen:s * opt.winlen +
                                           opt.winlen / 2]))
                fft = fft / (np.amax(fft) + 1.0 / 1000)
                fftc = fftc + fft
                ffts = np.mean(np.abs(
                    np.real(famtable['windowFFT'][:, s *
                                                  opt.winlen:s * opt.winlen +
                                                  opt.winlen / 2])),
                               axis=0)
                fftm = fftm + ffts / (np.amax(ffts) + 1.0 / 1000)
            ax2.plot(freq, fftm, 'r', linewidth=1)
            ax2.plot(freq, fftc, 'k', linewidth=0.25)
            ax2.set_xlim(0, opt.fmax * 1.5)

            # Plot amplitude timeline
            ax3 = fig.add_subplot(9, 3, (10, 15))
            ax3.plot_date(catalog,
                          windowAmp[fam],
                          'ro',
                          alpha=0.5,
                          markeredgecolor='r',
                          markeredgewidth=0.5)
            myFmt = matplotlib.dates.DateFormatter('%Y-%m-%d\n%H:%M')
            ax3.xaxis.set_major_formatter(myFmt)
            ax3.set_ylim(1,
                         max(rtable.cols.windowAmp[:][:, opt.printsta]) + 500)
            ax3.margins(0.05)
            ax3.set_ylabel('Amplitude (Counts)')
            ax3.set_yscale('log')

            # Plot spacing timeline
            ax4 = fig.add_subplot(9, 3, (16, 21))
            ax4.plot_date(catalog[1:],
                          spacing,
                          'ro',
                          alpha=0.5,
                          markeredgecolor='r',
                          markeredgewidth=0.5)
            myFmt = matplotlib.dates.DateFormatter('%Y-%m-%d\n%H:%M')
            ax4.xaxis.set_major_formatter(myFmt)
            ax4.set_xlim(ax3.get_xlim())
            ax4.set_ylim(1e-3, max(spacing) * 2)
            ax4.margins(0.05)
            ax4.set_ylabel('Time since previous event (hours)')
            ax4.set_xlabel('Date')
            ax4.set_yscale('log')

            # Plot correlation timeline
            idf = ids[fam]
            ix = np.where(np.in1d(id2, idf))
            r = np.zeros((max(idf) + 1, )).astype('int')
            r[idf] = range(len(idf))
            C = np.zeros((len(idf), len(idf)))
            C[r[id2[ix]], r[id1[ix]]] = ccc[ix]
            C[r[id1[ix]], r[id2[ix]]] = ccc[ix]
            C[range(len(idf)), range(len(idf))] = 1.0

            ax5 = fig.add_subplot(9, 3, (22, 27))
            ax5.plot_date(catalog,
                          C[np.argmax(np.sum(C, 0)), :],
                          'ro',
                          alpha=0.5,
                          markeredgecolor='r',
                          markeredgewidth=0.5)
            ax5.plot_date(catalog,
                          C[np.argmax(np.sum(C, 0)), :] + opt.cmin,
                          'wo',
                          alpha=0.5,
                          markeredgecolor='r',
                          markeredgewidth=0.5)
            myFmt = matplotlib.dates.DateFormatter('%Y-%m-%d\n%H:%M')
            ax5.xaxis.set_major_formatter(myFmt)
            ax5.set_xlim(ax3.get_xlim())
            ax5.set_ylim(opt.cmin - 0.02, 1.02)
            ax5.margins(0.05)
            ax5.set_ylabel('Cross-correlation coefficient')
            ax5.set_xlabel('Date')

            plt.tight_layout()
            plt.savefig('{0}/clusters/fam{1}.png'.format(opt.groupName, cnum),
                        dpi=100)
            plt.close(fig)

        if ftable.cols.printme[cnum] != 0 or ftable.cols.lastprint[
                cnum] != cnum:
            if cnum > 0:
                prev = "<a href='{0}.html'>&lt; Cluster {0}</a>".format(cnum -
                                                                        1)
            else:
                prev = " "
            if cnum < len(ftable) - 1:
                next = "<a href='{0}.html'>Cluster {0} &gt;</a>".format(cnum +
                                                                        1)
            else:
                next = " "
            # Now write a simple HTML file to show image and catalog
            with open('{0}/clusters/{1}.html'.format(opt.groupName, cnum),
                      'w') as f:
                f.write("""
                <html><head><title>{1} - Cluster {0}</title>
                </head><style>
                a {{color:red;}}
                body {{font-family:Helvetica; font-size:12px}}
                h1 {{font-size: 20px;}}
                </style>
                <body><center>
                {10} &nbsp; | &nbsp; {11}</br>             
                <h1>Cluster {0}</h1>                
                <img src="{0}.png" width=500 height=100></br></br>
                    Number of events: {2}</br>
                    Longevity: {5:.2f} days</br>
                    Mean event spacing: {7:.2f} hours</br>
                    Median event spacing: {8:.2f} hours</br>
                    Mean Frequency Index: {9:.2f}<br></br>
                    First event: {3}</br>
                    Core event: {6}</br>
                    Last event: {4}</br>
                <img src="fam{0}.png"></br>                
                """.format(cnum, opt.title, len(fam),
                           (UTCDateTime(startTime[minind]) +
                            windowStart[minind] / opt.samprate).isoformat(),
                           (UTCDateTime(startTime[maxind]) +
                            windowStart[maxind] / opt.samprate).isoformat(),
                           longevity,
                           (UTCDateTime(startTime[core]) +
                            windowStart[core] / opt.samprate).isoformat(),
                           np.mean(spacing), np.median(spacing),
                           np.mean(np.nanmean(fi[fam], axis=1)), prev, next))

                if opt.checkComCat:
                    checkComCat(rtable, ftable, cnum, f, startTime,
                                windowStart, opt)

                f.write("""
                </center></body></html>
                """)
Example #32
0
# Pitch
tolerance = 0.8
downsample = 1
win_s = 4096 // downsample # fft size
hop_s = 1024  // downsample # hop size
pitch_o = pitch("yin", win_s, hop_s, RATE)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)


for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
    buffer = stream.read(CHUNK)
    frames.append(buffer)

    signal = np.fromstring(buffer, dtype=np.float32)

    pitch = pitch_o(signal)[0]
    confidence = pitch_o.get_confidence()

    print("{} / {}".format(pitch,confidence))


print("* done recording")

stream.stop_stream()
stream.close()
p.terminate()

wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
Example #33
0
def ReadPdb(pdb_file):
    '''
    Get Chains, Chain type and coords of proteins from pdb file 

    Parameters
    ----------
    pdb_file : string
    
    Returns
    -------
    chain_dict: {chain, type, coords}

    '''
#    pdb_file = "P:\\My Documents\\MasterProjectBert\\ChromatinMC\\data\\1KX5.pdb"
    pdb_file = "PDBs\\1KX5.pdb"

    f = open(pdb_file,'r')
    pdb = f.readlines()
    f.close()

    cpd = [l for l in pdb if l[0:6] == "COMPND"]
    chains=[]
    molecules=[]
    keywords = ['DNA', 'H2A', 'H2B', 'H3', 'H4']
    for l in cpd:
        s =l[11:].split(':')
        if s[0] == 'MOLECULE':
            for k in keywords:
                if s[1].find(k) >= 0: 
                    if k in chains:
                        chains.append(k + '*')
                    else: chains.append(k)
        if s[0] == 'CHAIN':
            s=s[1].split(';')[0].split(',')
            i=0
            for m in s:
                molecules.append(m.lstrip())
                if i > 0: 
                    chains.append(chains[-1]+'*')
                i+=1

    chain_dict = dict([(c,['','', np.zeros((1,3)), np.zeros(1)]) for c in chains])
    i = 0
    for c in chains:
        chain_dict[c][0] = molecules[i]
        i += 1
    chain_dict.pop('DNA*')
    
    # Use SEQRES for sequence 
    seq = [l for l in pdb if l[0:6] == "SEQRES"]
    for l in seq:
        for i in chain_dict:
            if chain_dict[i][0] == l[11]:
                chain_dict[i][1] += l[19:71]

    for i in chain_dict:
        chain_dict[i][1] = seq3_to_1(chain_dict[i][1])
        chain_dict[i][2] = np.zeros([len(chain_dict[i][1]),3])
    #removed /0 from "chain_dict[i][2] = np.zeros([len(chain_dict[i][1]),3])/0"
    #-bert
    
    # Grab all CA from ATOM entries
    # Grab B -factor for Phosphates
    B_factor = np.zeros(len(chain_dict['DNA'][2])-1)
    P_I = np.zeros((len(chain_dict['DNA'][2])-1,3))

    
    start_i = None  
    for l in pdb:
        if l[0:6] == "ATOM  " and l[13:16] == "CA ":
            nc = np.fromstring(l[30:55], sep = ' ')
            for i in chain_dict:
                if chain_dict[i][0] == l[21]:
#                    print int(l[22:26]
                    chain_dict[i][2][int(l[22:26])-1,:] = nc         
        if l[0:6] == "ATOM  " and l[13:16] == "P  ":
            if start_i == None:
                start_i = int(l[22:27])
            B_factor[int(l[22:27])-start_i] += float(l[61:66])
            if l[21]=='I':
                P_I[int(l[22:27])-start_i] = np.fromstring(l[30:55], sep = ' ')

#    chain_dict['DNA'][3]=B_factor
    av_I = np.mean(P_I, axis = 0)

    dI = np.sqrt(np.sum((P_I-av_I)**2, axis = 1))
    chain_dict['DNA'][3]= dI
#    plt.plot(np.arange(len(dI)), dI)
#
#    plt.plot(np.arange(len(B_factor)), B_factor)
#    plt.show()
    return chain_dict
Example #34
0
 def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
     if not filename in self.image_cache:
         filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read()
         self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
     return self.image_cache[filename]
        if event.type == KEYUP:
            return STOP

    return NO_ACTION

while True:
    # Accepts connection
    (connectionSocket, clientAddress) = serverSocket.accept()
    print('Connection requested from', clientAddress)        
    try:
        while True:
            # Get message
            length = recvall(connectionSocket, 16)
            print('image size:', int(length), 'bytes')
            stringData = recvall(connectionSocket, int(length))
            data = np.fromstring(stringData, dtype='uint8')

            # Sends response
            actionMessage = getAction()
            connectionSocket.send(actionMessage.encode().ljust(ACTION_LENGTH))
            # If action is not NO_ACTION, print the action on the screen
            if actionMessage != NO_ACTION:
                text = myfont.render(actionMessage, False, WHITE)
                screen.fill(BLACK)
                screen.blit(text, (10,1))

            # Shows the image
            # flips around the y-axis
            encodeStart = time.time()
            decodedImage = cv2.imdecode(data,1)
            frame = np.flipud(np .rot90(cv2.cvtColor(decodedImage,cv2.COLOR_BGR2RGB)))
Example #36
0
def checkComCat(rtable, ftable, cnum, f, startTime, windowStart, opt):
    """
    Checks repeater trigger times with projected arrival times from ANSS Comprehensive
    Earthquake Catalog (ComCat) and writes these to HTML and image files. Will also
    check NCEDC catalog if location is near Northern California.
    
    rtable: Repeater table
    ftable: Families table
    cnum: cluster number to check
    f: HTML file to write to
    startTime: startTime column from rtable (convenience)
    windowStart: windowStart column from rtable (convenience)
    opt: Options object describing station/run parameters
    
    Traces through iasp91 global velocity model; checks for local, regional, and
    teleseismic matches for limited set of phase arrivals
    """

    pc = ['Potential', 'Conflicting']
    model = TauPyModel(model="iasp91")
    mc = 0
    n = 0
    l = 0
    stalats = np.array(opt.stalats.split(',')).astype(float)
    stalons = np.array(opt.stalons.split(',')).astype(float)
    latc = np.mean(stalats)
    lonc = np.mean(stalons)

    members = np.fromstring(ftable[cnum]['members'], dtype=int, sep=' ')
    order = np.argsort(startTime[members])
    f.write('</br><b>ComCat matches:</b></br>')

    for m in members[order]:
        t = UTCDateTime(startTime[m]) + windowStart[m] / opt.samprate
        cc_url = ('http://earthquake.usgs.gov/fdsnws/event/1/query?'
                  'starttime={}&endtime={}&format=text').format(
                      t - 1800, t + 30)
        try:
            comcat = pd.read_csv(cc_url, delimiter='|')
            otime = comcat['Time'].tolist()
            lat = comcat['Latitude'].tolist()
            lon = comcat['Longitude'].tolist()
            dep = comcat['Depth/km'].tolist()
            mag = comcat['Magnitude'].tolist()
            place = comcat['EventLocationName'].tolist()
        except urllib2.HTTPError:
            otime = []
            lat = []
            lon = []
            dep = []
            mag = []
            place = []

        # Check if near Northern California, then go to NCEDC for additional events but
        # for shorter time interval
        if latc > 34 and latc < 42 and lonc > -124 and lonc < -116:
            cc_urlnc = ('http://ncedc.org/fdsnws/event/1/query?'
                        'starttime={}&endtime={}&format=text').format(
                            (t - 60).isoformat(), (t + 30).isoformat())
            try:
                ncedc = pd.read_csv(cc_urlnc, delimiter='|')
                otime.extend(ncedc[' Time '].tolist())
                lat.extend(ncedc[' Latitude '].tolist())
                lon.extend(ncedc[' Longitude '].tolist())
                dep.extend(ncedc[' Depth/km '].tolist())
                mag.extend(ncedc[' Magnitude '].tolist())
                place.extend(ncedc[' EventLocationName'].tolist())
            except ValueError:
                pass

        n0 = 0
        for c in range(len(otime)):
            deg = locations2degrees(lat[c], lon[c], latc, lonc)
            dt = t - UTCDateTime(otime[c])

            if deg <= opt.locdeg:
                mc += 1
                if np.remainder(mc, 100) == 0:
                    model = TauPyModel(model="iasp91")
                arrivals = model.get_travel_times(
                    source_depth_in_km=max(0, dep[c]),
                    distance_in_degree=deg,
                    phase_list=['p', 's', 'P', 'S'])
                if len(arrivals) > 0:
                    pt = np.zeros((len(arrivals), ))
                    pname = []
                    for a in range(len(arrivals)):
                        pt[a] = arrivals[a].time - dt
                        pname.append(arrivals[a].name)
                    if np.min(abs(pt)) < opt.serr:
                        amin = np.argmin(abs(pt))
                        f.write(
                            ('{} local match: {} ({}, {}) {}km M{} {} - ({}) '
                             '{:4.2f} s</br>').format(pc[n0], otime[c], lat[c],
                                                      lon[c], dep[c], mag[c],
                                                      place[c], pname[amin],
                                                      pt[amin]))
                        n0 = 1
                        l = l + 1
                        if l == 1:
                            llats = np.array(lat[c])
                            llons = np.array(lon[c])
                            ldeps = np.array(dep[c])
                        else:
                            llats = np.append(llats, lat[c])
                            llons = np.append(llons, lon[c])
                            ldeps = np.append(ldeps, dep[c])
            elif deg <= opt.regdeg and mag[c] >= opt.regmag:
                mc += 1
                if np.remainder(mc, 100) == 0:
                    model = TauPyModel(model="iasp91")
                arrivals = model.get_travel_times(
                    source_depth_in_km=max(0, dep[c]),
                    distance_in_degree=deg,
                    phase_list=['p', 's', 'P', 'S', 'PP', 'SS'])
                if len(arrivals) > 0:
                    pt = np.zeros((len(arrivals), ))
                    pname = []
                    for a in range(len(arrivals)):
                        pt[a] = arrivals[a].time - dt
                        pname.append(arrivals[a].name)
                    if np.min(abs(pt)) < opt.serr:
                        amin = np.argmin(abs(pt))
                        f.write((
                            '{} regional match: {} ({}, {}) {}km M{} {} - ({}) '
                            '{:4.2f} s</br>').format(pc[n0], otime[c], lat[c],
                                                     lon[c], dep[c], mag[c],
                                                     place[c], pname[amin],
                                                     pt[amin]))
                        n0 = 1
            elif deg > opt.regdeg and mag[c] >= opt.telemag:
                mc += 1
                if np.remainder(mc, 100) == 0:
                    model = TauPyModel(model="iasp91")
                arrivals = model.get_travel_times(
                    source_depth_in_km=max(0, dep[c]),
                    distance_in_degree=deg,
                    phase_list=[
                        'P', 'S', 'PP', 'SS', 'PcP', 'ScS', 'PKiKP', 'PKIKP'
                    ])
                if len(arrivals) > 0:
                    pt = np.zeros((len(arrivals), ))
                    pname = []
                    for a in range(len(arrivals)):
                        pt[a] = arrivals[a].time - dt
                        pname.append(arrivals[a].name)
                    if np.min(abs(pt)) < opt.serr:
                        amin = np.argmin(abs(pt))
                        f.write((
                            '{} teleseismic match: {} ({}, {}) {}km M{} {} - ({}) '
                            '{:4.2f} s</br>').format(pc[n0], otime[c], lat[c],
                                                     lon[c], dep[c], mag[c],
                                                     place[c], pname[amin],
                                                     pt[amin]))
                        n0 = 1
        if n0 > 1:
            n = n + 1
        else:
            n = n + n0
    if n > 0:
        f.write('Total potential matches: {}</br>'.format(n))
        f.write('Potential local matches: {}</br>'.format(l))
        if l > 0:
            m = Basemap(llcrnrlon=lonc - 2 * opt.locdeg,
                        llcrnrlat=latc - opt.locdeg,
                        urcrnrlon=lonc + 2 * opt.locdeg,
                        urcrnrlat=latc + opt.locdeg,
                        resolution='l',
                        projection='tmerc',
                        lon_0=lonc,
                        lat_0=latc)
            m.scatter(llons,
                      llats,
                      s=5,
                      alpha=0.5,
                      marker='o',
                      color='r',
                      latlon=True)
            m.scatter(stalons,
                      stalats,
                      marker='^',
                      color='k',
                      facecolors='None',
                      latlon=True)
            m.drawparallels(np.arange(np.floor(latc - opt.locdeg),
                                      np.ceil(latc + opt.locdeg),
                                      opt.locdeg / 2),
                            labels=[1, 0, 0, 0])
            m.drawmeridians(np.arange(np.floor(lonc - 2 * opt.locdeg),
                                      np.ceil(lonc + 2 * opt.locdeg),
                                      opt.locdeg),
                            labels=[0, 0, 0, 1])
            m.drawmapscale(lonc - opt.locdeg - 0.1,
                           latc - opt.locdeg + 0.1,
                           lonc,
                           latc,
                           length=50,
                           barstyle='fancy')
            plt.title('{} potential local matches (~{:3.1f} km depth)'.format(
                l, np.mean(ldeps)))
            plt.savefig('./{}/clusters/map{}.png'.format(opt.groupName, cnum),
                        dpi=100)
            plt.close()
            f.write('<img src="map{}.png"></br>'.format(cnum))
    else:
        f.write('No matches found</br>')
Example #37
0
def plotTimelines(rtable, ftable, ttable, opt):
    """
    Creates the primary .html Bokeh timelines
    
    rtable: Repeater table
    ftable: Families table
    opt: Options object describing station/run parameters
    
    """

    dt = rtable.cols.startTimeMPL[:]
    fi = np.nanmean(rtable.cols.FI[:], axis=1)
    longevity = ftable.cols.longevity[:]
    famstarts = ftable.cols.startTime[:]
    alltrigs = ttable.cols.startTimeMPL[:]

    # Create histogram of events/dybin
    histT, hT = np.histogram(alltrigs,
                             bins=np.arange(min(alltrigs),
                                            max(alltrigs + opt.dybin),
                                            opt.dybin))
    histR, hR = np.histogram(dt,
                             bins=np.arange(min(alltrigs),
                                            max(alltrigs + opt.dybin),
                                            opt.dybin))

    # Determine padding for hover bars (~1% of window range on each side)
    barpad = (max(alltrigs) - min(alltrigs)) * 0.01
    barpadr = opt.recplot * 0.01

    # Create histogram of events/hrbin
    histTr, hTr = np.histogram(alltrigs,
                               bins=np.arange(
                                   max(alltrigs) - opt.recplot,
                                   max(alltrigs + opt.hrbin / 24),
                                   opt.hrbin / 24))
    histRr, hRr = np.histogram(dt,
                               bins=np.arange(
                                   max(alltrigs) - opt.recplot,
                                   max(alltrigs + opt.hrbin / 24),
                                   opt.hrbin / 24))

    oTOOLS = ['pan,box_zoom,reset,resize,save,tap']

    o0 = figure(tools=oTOOLS,
                plot_width=1250,
                plot_height=250,
                x_axis_type='datetime')
    if opt.dybin >= 1:
        o0.title = 'Repeaters vs. Orphans by {:.1f} Day Bin'.format(opt.dybin)
    else:
        o0.title = 'Repeaters vs. Orphans by {:.1f} Hour Bin'.format(
            opt.dybin * 24)
    o0.grid.grid_line_alpha = 0.3
    o0.xaxis.axis_label = 'Date'
    o0.yaxis.axis_label = 'Events'

    o0.line(matplotlib.dates.num2date(hT[0:-1] + opt.dybin / 2),
            histT - histR,
            color='black',
            legend='Orphans')
    o0.line(matplotlib.dates.num2date(hR[0:-1] + opt.dybin / 2),
            histR,
            color='red',
            legend='Repeaters',
            line_width=2)
    o0.legend.orientation = "top_left"

    o0r = figure(tools=oTOOLS,
                 plot_width=1250,
                 plot_height=250,
                 x_axis_type='datetime')
    if opt.hrbin < 24:
        o0r.title = 'Last {0} Days: Repeaters vs. Orphans by {1:.1f} Hour Bin'.format(
            opt.recplot, opt.hrbin)
    else:
        o0r.title = 'Last {0} Days: Repeaters vs. Orphans by {1:.1f} Day Bin'.format(
            opt.recplot, opt.hrbin / 24)
    o0r.grid.grid_line_alpha = 0.3
    o0r.xaxis.axis_label = 'Date'
    o0r.yaxis.axis_label = 'Events'

    o0r.line(matplotlib.dates.num2date(hTr[0:-1] + opt.hrbin / 48),
             histTr - histRr,
             color='black',
             legend='Orphans')
    o0r.line(matplotlib.dates.num2date(hRr[0:-1] + opt.hrbin / 48),
             histRr,
             color='red',
             legend='Repeaters',
             line_width=2)
    o0r.legend.orientation = "top_left"

    o1 = figure(tools=oTOOLS,
                plot_width=1250,
                plot_height=250,
                x_axis_type='datetime',
                x_range=o0.x_range)
    o1.title = 'Frequency Index'
    o1.grid.grid_line_alpha = 0.3
    o1.xaxis.axis_label = 'Date'
    o1.yaxis.axis_label = 'FI'
    o1.circle(matplotlib.dates.num2date(dt),
              fi,
              color='red',
              line_alpha=0,
              size=3,
              fill_alpha=0.5)

    o1r = figure(tools=oTOOLS,
                 plot_width=1250,
                 plot_height=250,
                 x_axis_type='datetime',
                 x_range=o0r.x_range)
    o1r.title = 'Frequency Index'
    o1r.grid.grid_line_alpha = 0.3
    o1r.xaxis.axis_label = 'Date'
    o1r.yaxis.axis_label = 'FI'
    # Put invisible points in for case that there are no events
    o1r.circle(matplotlib.dates.num2date(hTr[0:2]), [1, 1],
               line_alpha=0,
               fill_alpha=0)
    o1r.circle(matplotlib.dates.num2date(
        dt[dt > (max(alltrigs) - opt.recplot)]),
               fi[dt > (max(alltrigs) - opt.recplot)],
               color='red',
               line_alpha=0,
               size=3,
               fill_alpha=0.5)

    o2 = figure(tools=oTOOLS,
                plot_width=1250,
                plot_height=250,
                x_axis_type='datetime',
                x_range=o0.x_range,
                y_axis_type='log',
                y_range=[0.1,
                         np.sort(alltrigs)[-1] - np.sort(alltrigs)[0]])
    o2.title = 'Cluster Longevity'
    o2.grid.grid_line_alpha = 0.3
    o2.xaxis.axis_label = 'Date'
    o2.yaxis.axis_label = 'Days'
    for n in range(len(famstarts)):
        o2.line((matplotlib.dates.num2date(famstarts[n]),
                 matplotlib.dates.num2date(famstarts[n] + longevity[n])),
                (longevity[n], longevity[n]),
                color='red',
                line_alpha=0.5)

    o2r = figure(tools=oTOOLS,
                 plot_width=1250,
                 plot_height=250,
                 x_axis_type='datetime',
                 x_range=o0r.x_range,
                 y_axis_type='log',
                 y_range=[0.1,
                          np.sort(alltrigs)[-1] - np.sort(alltrigs)[0]])
    o2r.title = 'Cluster Longevity'
    o2r.grid.grid_line_alpha = 0.3
    o2r.xaxis.axis_label = 'Date'
    o2r.yaxis.axis_label = 'Days'
    # Put invisible points in for case that there are no events
    o2r.circle(matplotlib.dates.num2date(hTr[0:2]), [1, 1],
               line_alpha=0,
               fill_alpha=0)
    for n in range(len(famstarts)):
        if (max(alltrigs) - opt.recplot) <= famstarts[n]:
            o2r.line((matplotlib.dates.num2date(famstarts[n]),
                      matplotlib.dates.num2date(famstarts[n] + longevity[n])),
                     (longevity[n], longevity[n]),
                     color='red',
                     line_alpha=0.5)
        elif (max(alltrigs) - opt.recplot) <= famstarts[n] + longevity[n]:
            o2r.line((matplotlib.dates.num2date(hTr[0]),
                      matplotlib.dates.num2date(famstarts[n] + longevity[n])),
                     (longevity[n], longevity[n]),
                     color='red',
                     line_alpha=0.5)
            o2r.text(
                time.mktime(matplotlib.dates.num2date(hTr[0]).timetuple()) *
                1000 - 28799000,
                longevity[n],
                text=['<'],
                text_font_size='9pt',
                text_baseline='middle',
                text_color='red',
                text_alpha=0.5)

    # Build hover to show an image of the cluster core
    hover = HoverTool(tooltips="""
        <div>
        <div>
            <img src="./clusters/@famnum.png" style="height: 100px; width: 500px;
                vertical-align: middle;" />
            <span style="font-size: 9px; font-family: Helvetica;">Cluster ID: </span>
            <span style="font-size: 12px; font-family: Helvetica;">@famnum</span>
        </div>
        </div>
        """,
                      names=["patch"])

    TOOLS = [hover, 'pan,box_zoom,reset,resize,save,tap']

    # Build hover to show an image of the cluster core
    hoverr = HoverTool(tooltips="""
        <div>
        <div>
            <img src="./clusters/@famnum.png" style="height: 100px; width: 500px;
                vertical-align: middle;" />
            <span style="font-size: 9px; font-family: Helvetica;">Cluster ID: </span>
            <span style="font-size: 12px; font-family: Helvetica;">@famnum</span>
        </div>
        </div>
        """,
                       names=["patchr"])

    TOOLSrec = [hoverr, 'pan,box_zoom,reset,resize,save,tap']

    p1 = figure(tools=TOOLS,
                plot_width=1250,
                plot_height=500,
                x_axis_type='datetime',
                x_range=o0.x_range)
    p1.title = 'Occurrence Timeline (Color by Events per Hour)'
    p1.grid.grid_line_alpha = 0.3
    p1.xaxis.axis_label = 'Date'
    p1.yaxis.axis_label = 'Cluster by Date ({}+ Members)'.format(opt.minplot)

    r1 = figure(tools=TOOLSrec,
                plot_width=1250,
                plot_height=500,
                x_axis_type='datetime',
                x_range=o0r.x_range)
    r1.title = 'Occurrence Timeline (Color by Events per Hour)'
    r1.grid.grid_line_alpha = 0.3
    r1.xaxis.axis_label = 'Date'
    r1.yaxis.axis_label = 'Cluster by Date'

    # Steal YlOrRd (len=256) colormap from matplotlib
    colormap = matplotlib.cm.get_cmap('YlOrRd')
    bokehpalette = [
        matplotlib.colors.rgb2hex(m)
        for m in colormap(np.arange(colormap.N)[::-1])
    ]

    # Build the lists and dictionaries
    n = 0
    m = 0
    for clustNum in range(ftable.attrs.nClust):

        members = np.fromstring(ftable[clustNum]['members'],
                                dtype=int,
                                sep=' ')

        # Create histogram of events/hour
        hist, h = np.histogram(dt[members],
                               bins=np.arange(min(dt[members]),
                                              max(dt[members] + 1.0 / 24),
                                              1.0 / 24))
        d1 = matplotlib.dates.num2date(h[hist > 0])
        d2 = matplotlib.dates.num2date(h[hist > 0] + 1.0 / 24)
        histlog = np.log10(hist[hist > 0])
        ind = [int(min(255, 255 * (i / 2))) for i in histlog]
        colors = [bokehpalette[i] for i in ind]

        if len(dt[members]) >= opt.minplot:

            # Date is required as datenum
            p1.line((matplotlib.dates.num2date(min(
                dt[members])), matplotlib.dates.num2date(max(dt[members]))),
                    (n, n),
                    color='black')

            p1.quad(top=n + 0.3,
                    bottom=n - 0.3,
                    left=d1,
                    right=d2,
                    color=colors)

            # Text doesn't understand datetimes, need to convert to a number and subtract
            # about 8 hours
            p1.text(time.mktime(
                matplotlib.dates.num2date(max(dt[members])).timetuple()) * 1000
                    - 28799000,
                    n,
                    text=['   {}'.format(len(dt[members]))],
                    text_font_size='9pt',
                    text_baseline='middle')

            # Build source for hover patches
            fnum = clustNum
            if n == 0:
                xs = [[
                    matplotlib.dates.num2date(min(dt[members]) - barpad),
                    matplotlib.dates.num2date(min(dt[members]) - barpad),
                    matplotlib.dates.num2date(max(dt[members]) + barpad),
                    matplotlib.dates.num2date(max(dt[members]) + barpad)
                ]]
                ys = [[n - 0.5, n + 0.5, n + 0.5, n - 0.5]]
                famnum = [fnum]
            else:
                xs.append([
                    matplotlib.dates.num2date(min(dt[members]) - barpad),
                    matplotlib.dates.num2date(min(dt[members]) - barpad),
                    matplotlib.dates.num2date(max(dt[members]) + barpad),
                    matplotlib.dates.num2date(max(dt[members]) + barpad)
                ])
                ys.append([n - 0.5, n + 0.5, n + 0.5, n - 0.5])
                famnum.append([fnum])

            n = n + 1

        if max(dt[members]) > hRr[0]:

            if min(dt[members]) < hRr[0]:
                r1.line((matplotlib.dates.num2date(hRr[0] - opt.hrbin / 6),
                         matplotlib.dates.num2date(max(dt[members]))), (m, m),
                        color='black')
                r1.text(time.mktime(
                    matplotlib.dates.num2date(hRr[0] - opt.hrbin /
                                              6).timetuple()) * 1000 -
                        28799000,
                        m,
                        text=['<'],
                        text_font_size='9pt',
                        text_baseline='middle')

                idx = np.where(h[hist > 0] > hRr[0])[0]

            else:
                r1.line((matplotlib.dates.num2date(min(dt[members])),
                         matplotlib.dates.num2date(max(dt[members]))), (m, m),
                        color='black')
                idx = np.arange(len(d1))

            r1.quad(top=m + 0.3,
                    bottom=m - 0.3,
                    left=np.array(d1)[idx],
                    right=np.array(d2)[idx],
                    color=np.array(colors)[idx])

            # Text doesn't understand datetimes, need to convert to a number and subtract
            # about 8 hours
            r1.text(time.mktime(
                matplotlib.dates.num2date(max(dt[members])).timetuple()) * 1000
                    - 28799000,
                    m,
                    text=['   {}'.format(len(dt[members]))],
                    text_font_size='9pt',
                    text_baseline='middle')

            # Build source for hover patches
            fnumr = clustNum
            if m == 0:
                xsr = [[
                    matplotlib.dates.num2date(
                        max(min(dt[members]), hRr[0]) - barpadr),
                    matplotlib.dates.num2date(
                        max(min(dt[members]), hRr[0]) - barpadr),
                    matplotlib.dates.num2date(max(dt[members]) + barpadr),
                    matplotlib.dates.num2date(max(dt[members]) + barpadr)
                ]]
                ysr = [[m - 0.5, m + 0.5, m + 0.5, m - 0.5]]
                famnumr = [fnumr]
            else:
                xsr.append([
                    matplotlib.dates.num2date(
                        max(min(dt[members]), hRr[0]) - barpadr),
                    matplotlib.dates.num2date(
                        max(min(dt[members]), hRr[0]) - barpadr),
                    matplotlib.dates.num2date(max(dt[members]) + barpadr),
                    matplotlib.dates.num2date(max(dt[members]) + barpadr)
                ])
                ysr.append([m - 0.5, m + 0.5, m + 0.5, m - 0.5])
                famnumr.append([fnumr])
            m = m + 1

    if n > 0:
        # Patches allow hovering for image of core and cluster number
        source = ColumnDataSource(data=dict(xs=xs, ys=ys, famnum=famnum))
        p1.patches(xs=xs, ys=ys, source=source, name="patch", alpha=0)

        # Tapping on one of the patches will open a window to a file with more information
        # on the cluster in question.
        url = "./clusters/@famnum.html"
        renderer = p1.select(name="patch")[0]
        renderer.nonselection_glyph = renderer.glyph.clone()
        taptool = p1.select(dict(type=TapTool))[0]
        taptool.names.append("patch")
        taptool.callback = OpenURL(url=url)

        if n > 30:
            p1.set(plot_height=n * 15, y_range=Range1d(-1, n))

    else:
        p1.circle(matplotlib.dates.num2date(hTr[0:2]), [0, 0],
                  line_alpha=0,
                  fill_alpha=0)

    if m > 0:
        sourcer = ColumnDataSource(data=dict(xs=xsr, ys=ysr, famnum=famnumr))
        r1.patches(xs=xsr, ys=ysr, source=sourcer, name="patchr", alpha=0)

        url = "./clusters/@famnum.html"
        renderer = r1.select(name="patchr")[0]
        renderer.nonselection_glyph = renderer.glyph.clone()
        taptool = r1.select(dict(type=TapTool))[0]
        taptool.names.append("patchr")
        taptool.callback = OpenURL(url=url)

        if m > 30:
            r1.set(plot_height=m * 15, y_range=Range1d(-1, m))

    else:
        r1.circle(matplotlib.dates.num2date(hTr[0:2]), [0, 0],
                  line_alpha=0,
                  fill_alpha=0)

    o = gridplot([[o0], [o1], [p1], [o2]])
    o_recent = gridplot([[o0r], [o1r], [r1], [o2r]])

    output_file('{}/overview.html'.format(opt.groupName),
                title='{} Overview'.format(opt.title))
    save(o)

    output_file('{}/overview_recent.html'.format(opt.groupName),
                title='{0} Overview - Last {1:.1f} Days'.format(
                    opt.title, opt.recplot))
    save(o_recent)
Example #38
0
    GPIN = 22
    BPIN = 24

    p, stream = audio_setup(CHUNK, RATE, FORMAT)
    redPwr, greenPwr, bluePwr = light_setup(RPIN, GPIN, BPIN)

    redPwr.ChangeDutyCycle(50)
    toggle = True

    time = 2
    n = 50
    past_peaks = np.zeros([n])
    last_change = 0
    #for i in range(int(time*RATE/CHUNK)):
    for i in range(0, 2):
        data = np.fromstring(stream.read(CHUNK), dtype=np.int16)
        print(data.tolist())
        peak = np.average(np.abs(data)) * 2

        #if peak > np.mean(past_peaks) and abs(last_change - i) > 10:
        if peak > 7200 and abs(last_change - i) > 3:
            last_change = i
            print("Changing")
            if toggle:
                redPwr.ChangeDutyCycle(100)
                toggle = False
            else:
                redPwr.ChangeDutyCycle(0)
                toggle = True
        past_peaks[i % n] = peak
Example #39
0
 def yuv_2_rgb(self, raw_buffer):
     image = np.fromstring(raw_buffer, dtype=np.uint8)
     image.resize(self.image_dimensions[1], self.image_dimensions[0], 2)
     return cv2.cvtColor(image, cv2.COLOR_YUV2RGB_UYVY)
Example #40
0
def readaxisblock(lines, start, size):
    nlines = ((size - 1) / 8) + 1
    try:
        return np.fromstring("".join(lines[start:start + nlines]), sep=" ")
    except ValueError:
        return np.fromstring("".join(lines[start:start + nlines - 1]), sep=" ")
Example #41
0
import cv2
import math as m
import numpy as np
from time import sleep
# Create a socket object
s = socket.socket()

# Define the port on which you want to connect
port = 5005
s.connect(('192.168.10.106', port))
while True:
    # connect to the server on local computer
    # receive data from the server
    len_str = s.recv(4)

    size = struct.unpack('!i', len_str)[0]
    #print(size)
    #print(' ')
    sleep(0.05)
    if size > 0:
        data = s.recv(size)
        nparr = np.fromstring(data, np.uint8)
        #print(len(data))
        if m.fabs(size) < 50000:
            img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
            cv2.imshow('received', img)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cv2.waitKey(0)
# close the connection
cv2.destroyAllWindows()
Example #42
0
import numpy as np

from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import classification_report
from keras import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD

#%%
## Read mnist data
train_filename = '/Users/abijithjkamath/Desktop/TECHNOLOGIE/RawData/mnist/train-images-idx3-ubyte'
with open(train_filename, 'rb') as f:
    zero, data_type, dims = struct.unpack('>HBB', f.read(4))
    shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
    mnist_train = np.fromstring(f.read(), dtype=np.uint8).reshape(shape)

test_filename = '/Users/abijithjkamath/Desktop/TECHNOLOGIE/RawData/mnist/t10k-images-idx3-ubyte'
with open(test_filename, 'rb') as f:
    zero, data_type, dims = struct.unpack('>HBB', f.read(4))
    shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
    mnist_test = np.fromstring(f.read(), dtype=np.uint8).reshape(shape)

labels_filename = '/Users/abijithjkamath/Desktop/TECHNOLOGIE/RawData/mnist/train-labels-idx1-ubyte'
with open(labels_filename, 'rb') as f:
    zero, data_type, dims = struct.unpack('>HBB', f.read(4))
    shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
    mnist_train_labels = np.fromstring(f.read(), dtype=np.uint8).reshape(shape)

test_labels_filename = '/Users/abijithjkamath/Desktop/TECHNOLOGIE/RawData/mnist/t10k-labels-idx1-ubyte'
with open(test_labels_filename, 'rb') as f:
Example #43
0
def base64_to_cv2(b64str: str):
    data = base64.b64decode(b64str.encode('utf8'))
    data = np.fromstring(data, np.uint8)
    data = cv2.imdecode(data, cv2.IMREAD_COLOR)
    return data
        sys.exit()

## Get temperature-density grid.  Temperature will be in MeV, rhoYe in g/cm^3.
if weaklib.choose('temperature-density grid',
                  ['FFN', 'input temperatures and densities']) == 'FFN':
    temp = weaklib.FFNtemp * 10**9 * weaklib.kB
    rhoYe = weaklib.FFNrhoYe
else:
    ## If to be input, get temperature.
    temp_unit = weaklib.choose('unit of temperature', ['MeV', 'T9'])
    default = '0.01'
    temp = input(
        '\nTemperatures ({}, temp1,temp2,..., default {}): '.format(
            temp_unit, default)
    )  #raw_input('\nTemperatures ({}, temp1,temp2,..., default {}): '.format(temp_unit,default))
    temp = numpy.fromstring(temp, sep=',')
    if len(temp) == 0:
        print('Using default.')
        temp = numpy.fromstring(default, sep=',')
    if temp_unit == 'T9':
        temp = temp * 10**9 * weaklib.kB
    ## If to be input, get rhoYe.
    default = '1'
    rhoYe = input(
        '\nDensity log(rho*Ye) (g/cm^3, log(rhoYe1),log(rhoYe2),... default {}): '
        .format(default)
    )  #raw_input('\nDensity log(rho*Ye) (g/cm^3, log(rhoYe1),log(rhoYe2),... default {}): '.format(default))
    rhoYe = numpy.fromstring(rhoYe, sep=',')
    if len(rhoYe) == 0:
        print('Using default.')
        rhoYe = numpy.fromstring(default, sep=',')
Example #45
0
 def bitstring_to_nparray(bitstring: str):
     if bitstring:
         return np.fromstring(bitstring, dtype='u1') - 48
Example #46
0
def recordPitchFromInput(duration):
    import pyaudio
    import sys
    import numpy as np
    import aubio
    
    pitches = []
    
    # initialize pyaudio
    p = pyaudio.PyAudio()
    
    # open stream
    buffer_size = 1024
    pyaudio_format = pyaudio.paFloat32
    n_channels = 1
    samplerate = 11000 #44100
    stream = p.open(format=pyaudio_format,
                    channels=n_channels,
                    rate=samplerate,
                    input=True,
                    frames_per_buffer=buffer_size)
    ##### START MY CODE
    outputsink = None
    total_frames = 0
    record_duration = duration - 0.85 # number determined through trial/error
    ##### END MY CODE
    
    # setup pitch
    tolerance = 0.8
    win_s = 4096 # fft size
    hop_s = buffer_size # hop size
    pitch_o = aubio.pitch("default", win_s, hop_s, samplerate)
    pitch_o.set_unit("midi")
    pitch_o.set_tolerance(tolerance)
    
    print("*** starting recording")
    while True:
        try:
            audiobuffer = stream.read(buffer_size)
            signal = np.fromstring(audiobuffer, dtype=np.float32)
    
            pitch = pitch_o(signal)[0]
            confidence = pitch_o.get_confidence()
            
            pitches.append(pitch)
    
            print("{} / {}".format(pitch,confidence))
    
            if outputsink:
                outputsink(signal, len(signal))
    
            if record_duration:
                total_frames += len(signal)
                if record_duration * samplerate < total_frames:
                    break
                    
        except KeyboardInterrupt:
            print("*** Ctrl+C pressed, exiting")
            break
    
    print("*** done recording")
    stream.stop_stream()
    stream.close()
    p.terminate()
    return getAverage(pitches)
Example #47
0
 def get_img_from_screen_shot(self):
     screen_shot = self.take_png_screenshot()
     nparr = np.fromstring(screen_shot, np.uint8)
     img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
     return img
Example #48
0
 
= 750
.
C
ONCLUSION
The self loop feedback gating mechanism of recurrent networks has been derived from first princi-
ples via a postulate of invariance to time warpings. 
'''

model = keras.models.load_model('../data/models/keras/model5.h5')

doc_data = utils.simple_preprocess(title=title, abstract=abstract, text=text)

data, ngrams = utils.create_features(doc_data, labels=False)

docvec = [(np.fromstring(instance['title_vec'].strip('[]'), sep=',') +
           np.fromstring(instance['abstract_vec'].strip('[]'), sep=',') +
           np.fromstring(instance['text_vec'].strip('[]'), sep=',')) / 3
          for instance in doc_data]
docvec = np.array(docvec)

#data = fulldata[:,:300] - docvec
wv = data[:, :300] - docvec  # 11 features
tfidf = data[:, 307:308]
data = np.hstack((wv, tfidf))

predictions = model.predict(data)

df_ = np.array([
    ngrams.reshape((-1, )),
    predictions.reshape((-1, )),
    def _load_with_vocab(self, embed_filepath, vocab, dtype=np.float32, padding='<pad>', unknown='<unk>',
                         error='ignore', init_method=None):
        r"""
        从embed_filepath这个预训练的词向量中抽取出vocab这个词表的词的embedding。EmbedLoader将自动判断embed_filepath是
        word2vec(第一行只有两个元素)还是glove格式的数据。

        :param str embed_filepath: 预训练的embedding的路径。
        :param vocab: 词表 :class:`~fastNLP.Vocabulary` 类型,读取出现在vocab中的词的embedding。
            没有出现在vocab中的词的embedding将通过找到的词的embedding的正态分布采样出来,以使得整个Embedding是同分布的。
        :param dtype: 读出的embedding的类型
        :param str padding: 词表中padding的token
        :param str unknown: 词表中unknown的token
        :param str error: `ignore` , `strict` ; 如果 `ignore` ,错误将自动跳过; 如果 `strict` , 错误将抛出。
            这里主要可能出错的地方在于词表有空行或者词表出现了维度不一致。
        :param init_method: 如何初始化没有找到的值。可以使用torch.nn.init.*中各种方法。默认使用torch.nn.init.zeros_
        :return torch.tensor:  shape为 [len(vocab), dimension], dimension由pretrain的embedding决定。
        """
        assert isinstance(vocab, Vocabulary), "Only fastNLP.Vocabulary is supported."
        if not os.path.exists(embed_filepath):
            raise FileNotFoundError("`{}` does not exist.".format(embed_filepath))
        with open(embed_filepath, 'r', encoding='utf-8') as f:
            line = f.readline().strip()
            parts = line.split()
            start_idx = 0
            if len(parts) == 2:
                dim = int(parts[1])
                start_idx += 1
            else:
                dim = len(parts) - 1
                f.seek(0)
            matrix = {}  # index是word在vocab中的index,value是vector或None(如果在pretrain中没有找到该word)
            if vocab.padding:
                matrix[vocab.padding_idx] = torch.zeros(dim)
            if vocab.unknown:
                matrix[vocab.unknown_idx] = torch.zeros(dim)
            found_count = 0
            found_unknown = False
            for idx, line in enumerate(f, start_idx):
                try:
                    parts = line.strip().split()
                    word = ''.join(parts[:-dim])
                    nums = parts[-dim:]
                    # 对齐unk与pad
                    if word == padding and vocab.padding is not None:
                        word = vocab.padding
                    elif word == unknown and vocab.unknown is not None:
                        word = vocab.unknown
                        found_unknown = True
                    if word in vocab:
                        index = vocab.to_index(word)
                        matrix[index] = torch.from_numpy(np.fromstring(' '.join(nums), sep=' ', dtype=dtype, count=dim))
                        if self.only_norm_found_vector:
                            matrix[index] = matrix[index] / np.linalg.norm(matrix[index])
                        found_count += 1
                except Exception as e:
                    if error == 'ignore':
                        warnings.warn("Error occurred at the {} line.".format(idx))
                    else:
                        logger.error("Error occurred at the {} line.".format(idx))
                        raise e
            logger.info("Found {} out of {} words in the pre-training embedding.".format(found_count, len(vocab)))
            if not self.only_use_pretrain_word:  # 如果只用pretrain中的值就不要为未找到的词创建entry了
                for word, index in vocab:
                    if index not in matrix and not vocab._is_word_no_create_entry(word):
                        if found_unknown:  # 如果有unkonwn,用unknown初始化
                            matrix[index] = matrix[vocab.unknown_idx]
                        else:
                            matrix[index] = None
            # matrix中代表是需要建立entry的词
            vectors = self._randomly_init_embed(len(matrix), dim, init_method)
            
            if vocab.unknown is None:  # 创建一个专门的unknown
                unknown_idx = len(matrix)
                vectors = torch.cat((vectors, torch.zeros(1, dim)), dim=0).contiguous()
            else:
                unknown_idx = vocab.unknown_idx
            self.register_buffer('words_to_words', torch.full((len(vocab), ), fill_value=unknown_idx).long())
            index = 0
            for word, index_in_vocab in vocab:
                if index_in_vocab in matrix:
                    vec = matrix.get(index_in_vocab)
                    if vec is not None:  # 使用找到的vector, 如果为None说明需要训练
                        vectors[index] = vec
                    self.words_to_words[index_in_vocab] = index
                    index += 1

            return vectors
Example #50
0
recorder.setformat(alsaaudio.PCM_FORMAT_FLOAT_LE)  # PCM_FORMAT_GSM
recorder.setchannels(1)

# create aubio pitch detection (first argument is method, "default" is
# "yinfft", can also be "yin", "mcomb", fcomb", "schmitt").
pitcher = aubio.pitch("default", win_s, hop_s, samplerate)
# set output unit (can be 'midi', 'cent', 'Hz', ...)
pitcher.set_unit("Hz")
# ignore frames under this level (dB)
pitcher.set_silence(-40)

print("Starting to listen, press Ctrl+C to stop")

# main loop

while True:
    try:
        # read data from audio input
        _, data = recorder.read()
        # convert data to aubio float samples
        samples = np.fromstring(data, dtype=aubio.float_type)
        # pitch of current frame
        freq = pitcher(samples)[0]
        # compute energy of current block
        energy = np.sum(samples**2) / len(samples)
        # do something with the results
        print("{:10.4f} {:10.4f}".format(freq, energy))
    except KeyboardInterrupt:
        print("Ctrl+C pressed, exiting")
        break
Example #51
0
def dftd3_harvest(jobrec, modulerec):
    """Process raw results from read-only `dftd3rec` into Datum
    fields in returned `jobrec`: jobrec@i, dftd3rec@io -> jobrec@io.

    Parameters
    ----------
    jobrec : dict
        Nested dictionary with input specifications for DFTD3 in generic
        QC terms.
    dftd3rec : dict
        Nested dictionary with input specification and output collection
        from DFTD3 in program-specific commands, files, & output capture.

    Returns
    -------
    jobrec : dict
        Nested dictionary with input specification and output collection
        from DFTD3 in generic QC terms.

    Notes
    -----
    Central to harvesting is the fact (to the planting, not to the DFTD3
    program) that 2-body and 3-body are run separately. Because of how
    damping functions work (see GH:psi4/psi4#1407), some 2-body damping
    schemes can give wrong answers for 3-body. And because 3-body is
    set to run with some dummy values, the 2-body values are no good.

    """
    # amalgamate output
    text = modulerec['stdout']
    text += '\n  <<<  DFTD3 Results  >>>\n'

    for fl, contents in modulerec['outfiles'].items():
        if contents is not None:
            text += f'\n  DFTD3 scratch file {fl} has been read.\n'
            text += contents

    # parse energy output (could go further and break into E6, E8, E10 and Cn coeff)
    real = np.array(jobrec['molecule']['real'])
    full_nat = real.shape[0]
    real_nat = np.sum(real)

    for ln in modulerec['stdout'].splitlines():
        if re.search('DFTD3 V', ln):
            version = ln.replace('DFTD3', '').replace('|', '').strip().lower()
        elif re.match(' Edisp /kcal,au', ln):
            ene = Decimal(ln.split()[3])
        elif re.match(r" E6\(ABC\) \"   :", ln):  # c. v3.2.0
            raise ValueError("Cannot process ATM results from DFTD3 prior to v3.2.1.")
        elif re.match(r""" E6\(ABC\) /kcal,au:""", ln):
            atm = Decimal(ln.split()[-1])
        elif re.match(' normal termination of dftd3', ln):
            break
    else:
        if not ((real_nat == 1) and (jobrec['driver'] == 'gradient')):
            raise ValueError('Unsuccessful run. Possibly -D variant not available in dftd3 version.')

    # parse gradient output
    # * DFTD3 crashes on one-atom gradients. Avoid the error (above) and just force the correct result (below).
    if modulerec['outfiles']['dftd3_gradient'] is not None:
        srealgrad = modulerec['outfiles']['dftd3_gradient'].replace('D', 'E')
        realgrad = np.fromstring(srealgrad, count=3 * real_nat, sep=' ').reshape((-1, 3))
    elif real_nat == 1:
        realgrad = np.zeros((1, 3))

    if modulerec['outfiles']['dftd3_abc_gradient'] is not None:
        srealgrad = modulerec['outfiles']['dftd3_abc_gradient'].replace('D', 'E')
        realgradabc = np.fromstring(srealgrad, count=3 * real_nat, sep=' ').reshape((-1, 3))
    elif real_nat == 1:
        realgradabc = np.zeros((1, 3))

    if jobrec['driver'] == 'gradient':
        ireal = np.argwhere(real).reshape((-1))
        fullgrad = np.zeros((full_nat, 3))
        rg = realgradabc if (jobrec['extras']['info']['dashlevel'] == 'atmgr') else realgrad
        try:
            fullgrad[ireal, :] = rg
        except NameError as exc:
            raise Dftd3Error('Unsuccessful gradient collection.') from exc

    qcvkey = jobrec['extras']['info']['fctldash'].upper()

    # OLD WAY
    calcinfo = []
    if jobrec['extras']['info']['dashlevel'] == 'atmgr':
        calcinfo.append(qcel.Datum('DISPERSION CORRECTION ENERGY', 'Eh', atm))
        calcinfo.append(qcel.Datum('3-BODY DISPERSION CORRECTION ENERGY', 'Eh', atm))
        calcinfo.append(qcel.Datum('AXILROD-TELLER-MUTO 3-BODY DISPERSION CORRECTION ENERGY', 'Eh', atm))

        if jobrec['driver'] == 'gradient':
            calcinfo.append(qcel.Datum('DISPERSION CORRECTION GRADIENT', 'Eh/a0', fullgrad))
            calcinfo.append(qcel.Datum('3-BODY DISPERSION CORRECTION GRADIENT', 'Eh/a0', fullgrad))
            calcinfo.append(qcel.Datum('AXILROD-TELLER-MUTO 3-BODY DISPERSION CORRECTION GRADIENT', 'Eh/a0', fullgrad))

    else:
        calcinfo.append(qcel.Datum('DISPERSION CORRECTION ENERGY', 'Eh', ene))
        calcinfo.append(qcel.Datum('2-BODY DISPERSION CORRECTION ENERGY', 'Eh', ene))
        if qcvkey:
            calcinfo.append(qcel.Datum(f'{qcvkey} DISPERSION CORRECTION ENERGY', 'Eh', ene))

        if jobrec['driver'] == 'gradient':
            calcinfo.append(qcel.Datum('DISPERSION CORRECTION GRADIENT', 'Eh/a0', fullgrad))
            calcinfo.append(qcel.Datum('2-BODY DISPERSION CORRECTION GRADIENT', 'Eh/a0', fullgrad))
            if qcvkey:
                calcinfo.append(qcel.Datum(f'{qcvkey} DISPERSION CORRECTION GRADIENT', 'Eh/a0', fullgrad))

    calcinfo1 = {info.label: info for info in calcinfo}
    text += qcel.datum.print_variables(calcinfo1)
    calcinfo = {info.label: info.data for info in calcinfo}
    calcinfo = qcel.util.unnp(calcinfo, flat=True)

    # NEW WAY
    #module_vars = {}
    #module_vars['DISPERSION CORRECTION ENERGY'] =  ene
    #module_vars['{} DISPERSION CORRECTION ENERGY'.format(qcvkey)] =  ene
    #if jobrec['driver'] == 'gradient':
    #    module_vars['DISPERSION CORRECTION GRADIENT'] = fullgrad
    #    module_vars['{} DISPERSION CORRECTION GRADIENT'.format(qcvkey)] = fullgrad
    #
    #module_vars = PreservingDict(module_vars)
    #qcvars.build_out(module_vars)
    #calcinfo = qcvars.certify(module_vars)
    #text += print_variables(calcinfo)

    jobrec['stdout'] = text
    jobrec['extras']['qcvars'] = calcinfo

    prov = {}
    prov['creator'] = 'dftd3'
    prov['routine'] = sys._getframe().f_code.co_name
    prov['version'] = version
    jobrec['provenance'] = prov

    return jobrec
def load_seq_crop_data_masktumor_try(Parameter_List):
    img = Parameter_List[0]
    tumor = Parameter_List[1]
    lines = Parameter_List[2]
    numid = Parameter_List[3]
    minindex = Parameter_List[4]
    maxindex = Parameter_List[5]
    #  randomly scale
    scale = np.random.uniform(0.8,1.2)
    deps = int(args.input_size * scale)
    rows = int(args.input_size * scale)
    cols = 3

    sed = np.random.randint(1,numid)
    cen = lines[sed-1]
    cen = np.fromstring(cen, dtype=int, sep=' ')

    a = min(max(minindex[0] + deps/2, cen[0]), maxindex[0]- deps/2-1)
    b = min(max(minindex[1] + rows/2, cen[1]), maxindex[1]- rows/2-1)
    c = min(max(minindex[2] + cols/2, cen[2]), maxindex[2]- cols/2-1)
   # pdb.set_trace()
    cropp_img = img[int(a - deps / 2):int(a + deps / 2), int(b - rows / 2):int(b + rows / 2),
                int(c - cols / 2):int(c + cols / 2 + 1)].copy()
    cropp_tumor = tumor[int(a - deps / 2):int(a + deps / 2),int( b - rows / 2):int(b + rows / 2),
                 int( c - cols / 2):int(c + cols / 2 + 1)].copy()
#    cropp_img = img[a - deps / 2:a + deps / 2, b - rows / 2:b + rows / 2,
#                c - cols / 2: c + cols / 2 + 1].copy()
#    cropp_tumor = tumor[a - deps / 2:a + deps / 2, b - rows / 2:b + rows / 2,
#                  c - cols / 2:c + cols / 2 + 1].copy()

    cropp_img -= MEAN
     # randomly flipping
    flip_num = np.random.randint(0, 8)
    if flip_num == 1:
        cropp_img = np.flipud(cropp_img)
        cropp_tumor = np.flipud(cropp_tumor)
    elif flip_num == 2:
        cropp_img = np.fliplr(cropp_img)
        cropp_tumor = np.fliplr(cropp_tumor)
    elif flip_num == 3:
        cropp_img = np.rot90(cropp_img, k=1, axes=(1, 0))
        cropp_tumor = np.rot90(cropp_tumor, k=1, axes=(1, 0))
    elif flip_num == 4:
        cropp_img = np.rot90(cropp_img, k=3, axes=(1, 0))
        cropp_tumor = np.rot90(cropp_tumor, k=3, axes=(1, 0))
    elif flip_num == 5:
        cropp_img = np.fliplr(cropp_img)
        cropp_tumor = np.fliplr(cropp_tumor)
        cropp_img = np.rot90(cropp_img, k=1, axes=(1, 0))
        cropp_tumor = np.rot90(cropp_tumor, k=1, axes=(1, 0))
    elif flip_num == 6:
        cropp_img = np.fliplr(cropp_img)
        cropp_tumor = np.fliplr(cropp_tumor)
        cropp_img = np.rot90(cropp_img, k=3, axes=(1, 0))
        cropp_tumor = np.rot90(cropp_tumor, k=3, axes=(1, 0))
    elif flip_num == 7:
        cropp_img = np.flipud(cropp_img)
        cropp_tumor = np.flipud(cropp_tumor)
        cropp_img = np.fliplr(cropp_img)
        cropp_tumor = np.fliplr(cropp_tumor)

    cropp_tumor = resize(cropp_tumor, (args.input_size,args.input_size,args.input_cols), order=0, mode='edge', cval=0, clip=True, preserve_range=True)
    cropp_img   = resize(cropp_img, (args.input_size,args.input_size,args.input_cols), order=3, mode='constant', cval=0, clip=True, preserve_range=True)
    return cropp_img, cropp_tumor[:,:,1]
Example #53
0
    def read(self, nframes=10, lframes=1024, sframes=1):
        """Process the tiq input file.
        Following information are extracted, except Data offset, all other are stored in the dic. Data needs to be normalized over 50 ohm.

        AcquisitionBandwidth
        Frequency
        File name
        Data I and Q [Unit is Volt]
        Data Offset
        DateTime
        NumberSamples
        Resolution Bandwidth
        RFAttenuation (it is already considered in the data scaling, no need to use this value, only for info)
        Sampling Frequency
        Span
        Voltage Scaling
        """

        self.lframes = lframes
        self.nframes = nframes
        self.sframes = sframes

        filesize = os.path.getsize(self.filename)
        log.info("File size is {} bytes.".format(filesize))

        with open(self.filename) as f:
            line = f.readline()
        self.data_offset = int(line.split("\"")[1])

        with open(self.filename, 'rb') as f:
            ba = f.read(self.data_offset)

        xml_tree_root = et.fromstring(ba)

        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}AcquisitionBandwidth'):
            self.acq_bw = float(elem.text)
        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}Frequency'):
            self.center = float(elem.text)
        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}DateTime'):
            self.date_time = str(elem.text)
        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}NumberSamples'):
            self.nsamples_total = int(
                elem.text
            )  # this entry matches (filesize - self.data_offset) / 8) well
        for elem in xml_tree_root.iter('NumericParameter'):
            if 'name' in elem.attrib and elem.attrib[
                    'name'] == 'Resolution Bandwidth' and elem.attrib[
                        'pid'] == 'rbw':
                self.rbw = float(elem.find('Value').text)
        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}RFAttenuation'):
            self.rf_att = float(elem.text)
        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}SamplingFrequency'):
            self.fs = float(elem.text)
        for elem in xml_tree_root.iter('NumericParameter'):
            if 'name' in elem.attrib and elem.attrib[
                    'name'] == 'Span' and elem.attrib['pid'] == 'globalrange':
                self.span = float(elem.find('Value').text)
        for elem in xml_tree_root.iter(
                tag='{http://www.tektronix.com}Scaling'):
            self.scale = float(elem.text)

        log.info(
            "Center {0} Hz, span {1} Hz, sampling frequency {2} scale factor {3}."
            .format(self.center, self.span, self.fs, self.scale))
        log.info("Header size {} bytes.".format(self.data_offset))

        log.info(
            "Proceeding to read binary section, 32bit (4 byte) little endian.")
        log.info('Total number of samples: {}'.format(self.nsamples_total))
        log.info("Frame length: {0} data points = {1}s".format(
            lframes, lframes / self.fs))
        self.nframes_tot = int(self.nsamples_total / lframes)
        log.info("Total number of frames: {0} = {1}s".format(
            self.nframes_tot, self.nsamples_total / self.fs))
        log.info("Start reading at offset: {0} = {1}s".format(
            sframes, sframes * lframes / self.fs))
        log.info("Reading {0} frames = {1}s.".format(
            nframes, nframes * lframes / self.fs))

        self.header = ba

        total_n_bytes = 8 * nframes * lframes  # 8 comes from 2 times 4 byte integer for I and Q
        start_n_bytes = 8 * (sframes - 1) * lframes

        try:
            with open(self.filename, 'rb') as f:
                f.seek(self.data_offset + start_n_bytes)
                ba = f.read(total_n_bytes)
        except:
            log.error('File seems to end here!')
            return

        # return a numpy array of little endian 8 byte floats (known as doubles)
        self.data_array = np.fromstring(
            ba, dtype='<i4')  # little endian 4 byte ints.
        # Scale to retrieve value in Volts. Augmented assignment does not work here!
        self.data_array = self.data_array * self.scale
        self.data_array = self.data_array.view(
            dtype='c16'
        )  # reinterpret the bytes as a 16 byte complex number, which consists of 2 doubles.

        log.info("Output complex array has a size of {}.".format(
            self.data_array.size))
Example #54
0
    #             image = dataset.images[i].tostring()
    #             example = tf.train.Example(features=tf.train.Features(feature={
    #                 'height':tf.train.Feature(int64_list=tf.train.Int64List(value=
    #                                                                         [dataset.images.shape[1]])),
    #                 'width': tf.train.Feature(int64_list=tf.train.Int64List(value=[
    #                     dataset.images.shape[2]])),
    #                 'depth': tf.train.Feature(int64_list=tf.train.Int64List(value=[
    #                     dataset.images.shape[3]])),
    #                 'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[
    #                     int(dataset.labels[i])])),
    #                 'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[
    #                     image]))}))
    #             writer.write(example.SerializeToString())
    #         writer.close()

    filename = os.path.join(save_dir, 'train.tfrecords')
    iterator = tf.python_io.tf_record_iterator(filename)
    serilized_example = next(iterator)

    example = tf.train.Example()
    example.ParseFromString(serilized_example)
    height = example.features.feature['height'].int64_list.value
    width = example.features.feature['width'].int64_list.value
    label = example.features.feature['label'].int64_list.value
    image_raw = example.features.feature['image_raw'].bytes_list.value
    print('type of image_raw:', type(image_raw))
    print('height:', height)
    img_flat = np.fromstring(image_raw[0], np.uint8)
    image = np.reshape(img_flat, newshape=(height[0], width[0], -1))
    print('shape of image:', image.shape)
Example #55
0
 def binparse(self, read):
     result = np.fromstring(read(self._memsize),
                            dtype=self._bigendian_format)
     return result[0], self.is_null(result[0])
Example #56
0

if __name__ == "__main__":
    # vgg_model = tf.keras.applications.VGG16(weights='imagenet')
    # vgg_extractor = tf.keras.models.Model(inputs=vgg_model.input, outputs=vgg_model.get_layer("fc2").output)
    inception_resnet_v2_extractor = inception_resnet_v2.InceptionResNetV2(
        weights='imagenet', include_top=False, input_shape=(224, 224, 3))
    img_dir_path = input('[INPUT] image dir path : ')
    features = {'img': [], 'inception_resnet_v2': [], 'cluster': []}
    pics_num = os.listdir(img_dir_path)
    bar = progressbar.ProgressBar(maxval=len(pics_num), \
    widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
    bar.start()
    for i, img_path in enumerate(pics_num):
        img_path = img_dir_path + img_path
        with open(img_path, 'rb') as f:
            img_bytes = f.read()
        Image = cv2.imdecode(np.fromstring(img_bytes, np.uint8),
                             cv2.IMREAD_UNCHANGED)
        Image = Image[:, :, :3]
        single_feature_table = feature_table_creator(Image)
        features['img'].append(img_path)
        # features['vgg'].append(single_feature_table['vgg'])
        features['inception_resnet_v2'].append(
            single_feature_table['inception_resnet_v2'])
        bar.update(i + 1)
        sleep(0.1)
    pkl_file_name = img_dir_path.split('/')[-2] + '.pkl'
    with open(pkl_file_name, 'wb') as f:
        pkl.dump(features, f)
Example #57
0
def parseVector(partitionIndex,iterator,num_partitions):
    for i,line in enumerate(iterator) :
        arr= np.fromstring(line,dtype=np.float64)
        yield (i*num_partitions+partitionIndex,arr)
def readmnistlabels(fn):
    with open(fn, 'rb') as fp:
        magic = bigend4(fp.read(4))
        assert magic == 2049
        n = bigend4(fp.read(4))
        return np.fromstring(fp.read(), dtype=np.uint8)
Example #59
0
import sys
import time
import Image
import ImageGrab

path = "C://Users\Thomas\Desktop\網安project_5"
name = '2.wav'
filename = os.path.join(path, name)

f = wave.open(filename, 'rb')

params = f.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]

strData = f.readframes(nframes)
waveData = np.fromstring(strData, dtype=np.short)

waveData = waveData * 1.0 / max(abs(waveData))

waveData = np.reshape(waveData, [nframes, nchannels]).T
f.close()

time = np.arange(0, nframes) * (1.0 / framerate)
time = np.reshape(time, [nframes, 1]).T
plt.plot(time[0, :nframes], waveData[0, :nframes], c="b")
plt.xlabel("time")
plt.ylabel("amplitude")
plt.title("Original wave")
#plt.show() 頻譜圖
framelength = 10  #時間軸取樣寬度
framesize = framelength * framerate
Example #60
0
 def binparse(self, read):
     result = np.fromstring(read(self._memsize),
                            dtype=self._bigendian_format)[0]
     result_mask = self._base.is_null(result)
     return result, result_mask