def encode_gray(number): s1 = numpy.binary_repr(number); s2 = "0" + numpy.binary_repr(number)[:-1]; return "".join(["1" if s1[i] != s2[i] else "0" for i in range(len(s1))]);
def write_adc(self,addr,data): SCLK = 0x200 CS = self.chip_select IDLE = SCLK SDA_SHIFT = 8 self.snap.write_int('adc16_controller',IDLE,offset=0,blindwrite=True) for i in range(8): addr_bit = (addr>>(8-i-1))&1 state = (addr_bit<<SDA_SHIFT) | CS self.snap.write_int('adc16_controller',state,offset=0,blindwrite=True) logging.debug("Printing address state written to adc16_controller, offset=0, clock low") logging.debug(np.binary_repr(state,width=32)) # print(np.binary_repr(state,width=32)) state = (addr_bit<<SDA_SHIFT) | CS | SCLK self.snap.write_int('adc16_controller',state,offset=0,blindwrite=True) logging.debug("Printing address state written to adc16_controller, offset=0, clock high") logging.debug(np.binary_repr(state,width=32)) # print(np.binary_repr(state,width=32)) for j in range(16): data_bit = (data>>(16-j-1))&1 state = (data_bit<<SDA_SHIFT) | CS self.snap.write_int('adc16_controller',state,offset=0,blindwrite=True) logging.debug("Printing data state written to adc16_controller, offset=0, clock low") logging.debug(np.binary_repr(state,width=32)) # print(np.binary_repr(state,width=32)) state =( data_bit<<SDA_SHIFT) | CS | SCLK self.snap.write_int('adc16_controller',state,offset=0,blindwrite=True) logging.debug("Printing data address state written to adc16_controller, offset=0, clock high") logging.debug(np.binary_repr(state,width=32)) # print(np.binary_repr(state,width=32)) self.snap.write_int('adc16_controller',IDLE,offset=0,blindwrite=True)
def write_dp_item(coe, dp, palette, width, input_, pp): assert(1 <= width <= 31) coe.write(dp, low(pp)) coe.write(dp+1, eval('0b' + np.binary_repr(palette, 3) + np.binary_repr(-width, 5))) coe.write(dp+2, high(pp)) coe.write(dp+3, input_)
def embed(cover,secret,pos,skip): file=open("in.txt","w") multiple=False coverMatrix=pgm_to_mat(cover) secretMatrix=pgm_to_mat(secret) stegoMatrix=np.zeros(np.shape(coverMatrix), dtype=np.complex_) np.copyto(stegoMatrix,coverMatrix) dummy="" if(skip<1): skip=1 multiple=True for a in range(0,len(secretMatrix)): for b in range(0,len(secretMatrix)): dummy+=np.binary_repr(secretMatrix[a][b],width=8) #file.write(np.binary_repr(secretMatrix[a][b],width=8)+"\n") index=0 for a in range(0,len(stegoMatrix)*len(stegoMatrix),skip): rown=int(a % len(stegoMatrix)) coln=int(a / len(stegoMatrix)) if(index>=len(dummy)): break stegoMatrix[coln][rown] = ( int(coverMatrix[coln][rown]) & ~(1 << hash(coln,rown,pos) )) | (int(dummy[index],2) << hash(coln,rown,pos)) index += 1 if(multiple): stegoMatrix[coln][rown] = (int(stegoMatrix[coln][rown]) & ~(1 << (3-hash(coln, rown, pos)))) | ( int(dummy[index], 2) << (3-hash(coln, rown, pos))) index += 1 file.write(np.binary_repr(int(stegoMatrix[coln][rown]), 8) + "\n") return stegoMatrix
def convolve(image,psf,doPSF=True,edgeCheck=False): """ A reasonably fast convolution routine that supports re-entry with a pre-FFT'd PSF. Returns the convolved image and the FFT'd PSF. """ datadim1 = image.shape[0] datadim2 = image.shape[1] if datadim1!=datadim2: ddim = max(datadim1,datadim2) s = numpy.binary_repr(ddim-1) s = s[:-1]+'0' # Guarantee that padding is used else: ddim = datadim1 s = numpy.binary_repr(ddim-1) if s.find('0')>0: size = 2**len(s) if edgeCheck==True and size-ddim<8: size*=2 boxd = numpy.zeros((size,size)) r = size-datadim1 r1 = r2 = r/2 if r%2==1: r1 = r/2+1 c = size-datadim2 c1 = c2 = c/2 if c%2==1: c1 = c/2+1 boxdslice = (slice(r1,datadim1+r1),slice(c1,datadim2+c1)) boxd[boxdslice] = image else: boxd = image if doPSF: # Pad the PSF to the image size boxp = boxd*0. if boxd.shape[0]==psf.shape[0]: boxp = psf.copy() else: r = boxp.shape[0]-psf.shape[0] r1 = r/2+1 c = boxp.shape[1]-psf.shape[1] c1 = c/2+1 boxpslice = (slice(r1,psf.shape[0]+r1),slice(c1,psf.shape[1]+c1)) boxp[boxpslice] = psf.copy() # Store the transform of the image after the first iteration a = (numpy.fft.rfft2(boxp)) else: a = psf # PSF transform and multiplication b = a*numpy.fft.rfft2(boxd) # Inverse transform, including phase-shift to put image back in center; # this removes the requirement to do 2x zero-padding so makes things # go a bit quicker. b = numpy.fft.fftshift(numpy.fft.irfft2(b)).real # If the image was padded, remove the padding if s.find('0')>0: b = b[boxdslice] return b,a
def displaystates(psi, N=5, pop=False): ''' print the population of each state ''' for i in range(len(psi)): if np.around(abs(psi[i]), 5) > 0: if pop: print np.binary_repr(i).zfill(N), ": ", np.around(psi[i],3) else: print np.binary_repr(i).zfill(N), ": ", np.around(abs(psi[i]**2),3), np.around(psi[i], 5)
def loadFIRcoeffs(self): N_freqs = len(map(float, unicode(self.textedit_DACfreqs.toPlainText()).split())) taps = 26 for ch in range(N_freqs): # If the resonator's attenuation is >=99 then its FIR should be zeroed if self.zeroChannels[ch]: lpf = numpy.array([0.]*taps)*(2**11-1) print 'deleted ch ',ch else: lpf = numpy.array(self.fir)*(2**11-1) print ch #lpf = numpy.array([1.]+[0]*(taps-1))*(2**11-1) # 26 tap, 25 us matched fir #lpf = numpy.array([0.0875788844768 , 0.0840583257978 , 0.0810527406206 , 0.0779008825067 , 0.075106964962 , 0.0721712998256 , 0.0689723729398 , 0.066450095496 , 0.0638302570705 , 0.0613005685486 , 0.0589247737004 , 0.0565981917436 , 0.0544878914297 , 0.0524710948658 , 0.0503447054014 , 0.0483170854189 , 0.0463121066637 , 0.044504238059 , 0.0428469827102 , 0.0410615366471 , 0.0395570640218 , 0.0380071830756 , 0.0364836787854 , 0.034960959124 , 0.033456372241 , 0.0321854467182])*(2**11-1) #26 tap, 20 us matched fir #lpf = numpy.array([ 0.102806030245 , 0.097570344415 , 0.0928789946181 , 0.0885800360545 , 0.0841898850361 , 0.079995145104 , 0.0761649967857 , 0.0724892663141 , 0.0689470889358 , 0.0657584886557 , 0.0627766233242 , 0.0595952531565 , 0.0566356208278 , 0.053835736579 , 0.0510331408751 , 0.048623806127 , 0.0461240096904 , 0.0438134132285 , 0.0418265743203 , 0.0397546477453 , 0.0377809254888 , 0.0358044897245 , 0.0338686929847 , 0.0321034547839 , 0.0306255734188 , 0.0291036235859 ])*(2**11-1) #26 tap, 30 us matched fir #lpf = numpy.array([ 0.0781747107378 , 0.0757060398243 , 0.0732917718492 , 0.0708317694778 , 0.0686092845217 , 0.0665286923521 , 0.0643467681477 , 0.0621985982971 , 0.0600681642401 , 0.058054873199 , 0.0562486467178 , 0.0542955553149 , 0.0527148880657 , 0.05096365681 , 0.0491121116212 , 0.0474936094733 , 0.0458638771941 , 0.0443219286645 , 0.0429290438102 , 0.0415003391096 , 0.0401174498302 , 0.0386957715665 , 0.0374064708747 , 0.0362454802408 , 0.0350170176804 , 0.033873302383 ])*(2**11-1) #lpf = lpf[::-1] # 26 tap, lpf, 250 kHz, #lpf = numpy.array([-0 , 0.000166959420533 , 0.00173811663844 , 0.00420937801998 , 0.00333739357391 , -0.0056305703275 , -0.0212738104942 , -0.0318529375832 , -0.0193635986879 , 0.0285916612022 , 0.106763943766 , 0.18981814328 , 0.243495321192 , 0.243495321192 , 0.18981814328 , 0.106763943766 , 0.0285916612022 , -0.0193635986879 , -0.0318529375832 , -0.0212738104942 , -0.0056305703275 , 0.00333739357391 , 0.00420937801998 , 0.00173811663844 , 0.000166959420533 , -0])*(2**11-1) # 26 tap, lpf, 125 kHz. #lpf = numpy.array([0 , -0.000431898216436 , -0.00157886921107 , -0.00255492263971 , -0.00171727439076 , 0.00289724121972 , 0.0129123447233 , 0.0289345497995 , 0.0500906370566 , 0.0739622085341 , 0.0969821586979 , 0.115211955161 , 0.125291869266 , 0.125291869266 , 0.115211955161 , 0.0969821586979 , 0.0739622085341 , 0.0500906370566 , 0.0289345497995 , 0.0129123447233 , 0.00289724121972 , -0.00171727439076 , -0.00255492263971 , -0.00157886921107 , -0.000431898216436 , -0])*(2**11-1) # Generic 40 tap matched filter for 25 us lifetime pulse #lpf = numpy.array([0.153725595011 , 0.141052390733 , 0.129753816201 , 0.119528429291 , 0.110045314901 , 0.101336838027 , 0.0933265803805 , 0.0862038188673 , 0.0794067694409 , 0.0729543134914 , 0.0674101836798 , 0.0618283869464 , 0.0567253144676 , 0.0519730940444 , 0.047978953698 , 0.043791412767 , 0.0404560656757 , 0.0372466775252 , 0.0345000956808 , 0.0319243455811 , 0.0293425115323 , 0.0268372778298 , 0.0245216835234 , 0.0226817116475 , 0.0208024488535 , 0.0189575043357 , 0.0174290665862 , 0.0158791788119 , 0.0144611054123 , 0.0132599563305 , 0.0121083419203 , 0.0109003580368 , 0.0100328742978 , 0.00939328253743 , 0.00842247241585 , 0.00789304712484 , 0.00725494259117 , 0.00664528407122 , 0.00606688645845 , 0.00552041438208])*(2**11-1) #lpf = lpf[::-1] for n in range(taps/2): coeff0 = int(lpf[2*n]) coeff1 = int(lpf[2*n+1]) coeff0 = numpy.binary_repr(int(lpf[2*n]), 12) coeff1 = numpy.binary_repr(int(lpf[2*n+1]), 12) coeffs = int(coeff1+coeff0, 2) coeffs_bin = struct.pack('>l', coeffs) register_name = 'FIR_b' + str(2*n) + 'b' + str(2*n+1) self.roach.write(register_name, coeffs_bin) self.roach.write_int('FIR_load_coeff', (ch<<1) + (1<<0)) self.roach.write_int('FIR_load_coeff', (ch<<1) + (0<<0)) # Inactive channels will also be zeroed. lpf = numpy.array([0.]*taps) for ch in range(N_freqs, 256): for n in range(taps/2): #coeffs = struct.pack('>h', lpf[2*n]) + struct.pack('>h', lpf[2*n+1]) coeffs = struct.pack('>h', lpf[2*n+1]) + struct.pack('>h', lpf[2*n]) register_name = 'FIR_b' + str(2*n) + 'b' + str(2*n+1) self.roach.write(register_name, coeffs) self.roach.write_int('FIR_load_coeff', (ch<<1) + (1<<0)) self.roach.write_int('FIR_load_coeff', (ch<<1) + (0<<0)) print 'done loading fir.' self.status_text.setText('FIRs loaded')
def read_binaries(tks1, tks2, encoding): dtks1 = decode(tks1, encoding) if not dtks1 or not validate_length(dtks1): error("The first samples have different sizes") return dtks2 = decode(tks2, encoding) if not dtks2 or not validate_length(dtks2): error("The second samples have different sizes") return btks1 = [ "".join([np.binary_repr(ord(c), width=8) for c in tk ]) for tk in dtks1 ] btks2 = [ "".join([np.binary_repr(ord(c), width=8) for c in tk ]) for tk in dtks2 ] return btks1, btks2, "01"
def send_32(self, inbits): print "inbits", inbits programming_bits = np.bitwise_and(inbits,65535) #bits 0 16 address_branch = (np.bitwise_and(inbits,8323072)>>16) #bits 17 to 22 print "send stuff" print "address_branch", np.binary_repr(address_branch) print "programming_bits", np.binary_repr(programming_bits) print "address_branch", (address_branch) print "programming_bits", (programming_bits<<7) final_address = (programming_bits<<7) + (address_branch) + 2**31 print "final address", final_address biasusb_wrap.send_32(int(final_address))
def get_key_slow(self, iarr, level=None): if level is None: level = self.level i1, i2, i3 = iarr rep1 = np.binary_repr(i1, width=self.level) rep2 = np.binary_repr(i2, width=self.level) rep3 = np.binary_repr(i3, width=self.level) inter = np.zeros(self.level*3, dtype='c') inter[self.dim_slices[0]] = rep1 inter[self.dim_slices[1]] = rep2 inter[self.dim_slices[2]] = rep3 return int(inter.tostring(), 2)
def ExecuteQP (self, qubits, funcao, memory, customMatrices = []): # Method for build the structures (Pages, VPPs and sizesList) of the Quantum Processes. if qubits > 5: sizeVPP = 4 qtdPages = qubits/sizeVPP rest = qubits%sizeVPP if rest > 1: qtdPages += 1 else: qtdPages = 1 sizeVPP = qubits rest = 0 Pages = [] opIndex = 0 sizesList = [] for pageId in range (qtdPages): if rest == 1: qtdFunctions = sizeVPP + 1 rest = 0 elif rest > 1 and pageId == qtdPages - 1: qtdFunctions = rest else: qtdFunctions = sizeVPP Lvpp = [] for VPPIndex in range (2**qtdFunctions): ## Creates each VPP of a Lvpp listOp = self.StringToList(funcao, qtdFunctions, opIndex) param1 = numpy.binary_repr(VPPIndex, qtdFunctions) ## First parameter of each function to fill the QPPs zero = numpy.complex(0) pos = 0 list = [] for tupleIndex in range (2**qtdFunctions): ## Creates each tuple of a VPP param2 = numpy.binary_repr(tupleIndex,qtdFunctions) temp = numpy.complex(1) op = 0 while temp != zero and op < qtdFunctions: temp = temp * self.getValue(listOp[op], int(param1[op:op+1:]), int(param2[op:op+1:])) op += 1 if temp != zero: list.append([temp,pos]) pos += 1 Lvpp.append(list) Pages.append(Lvpp) opIndex += qtdFunctions sizesList.append(2**(qubits-opIndex)) self.ApplyValuesForQP(Pages,sizesList,memory,numpy.complex(1),0,0,numpy.binary_repr(0,qubits), qubits)
def _send_32(self, in_bits, debug=False): programming_bits = np.bitwise_and(in_bits,65535) #bits 0 16 address_branch = (np.bitwise_and(in_bits,8323072)>>16) #bits 17 to 22 final_address = (programming_bits<<7) + (address_branch) + 2**31 if debug: print "in_bits", in_bits print "send stuff" print "address_branch", np.binary_repr(address_branch) print "programming_bits", np.binary_repr(programming_bits) print "address_branch", (address_branch) print "programming_bits", (programming_bits<<7) print "final address", final_address self._client.send(str([0,final_address])) time.sleep(0.001)
def prep(image,psf): datadim1 = image.shape[0] datadim2 = image.shape[1] if datadim1!=datadim2: ddim = max(datadim1,datadim2) s = numpy.binary_repr(ddim-1) s = s[:-1]+'0' # Guarantee that padding is used else: ddim = datadim1 s = numpy.binary_repr(ddim-1) if s.find('0')>0: size = 2**len(s) boxd = numpy.zeros((size,size)) r = size-datadim1 r1 = r2 = r/2 if r%2==1: r1 = r/2+1 c = size-datadim2 c1 = c2 = c/2 if c%2==1: c1 = c/2+1 boxdslice = (slice(r1,datadim1+r1),slice(c1,datadim2+c1)) boxd[boxdslice] = image else: boxd = image boxp = boxd*0. if boxd.shape[0]==psf.shape[0]: boxp = psf.copy() else: r = boxp.shape[0]-psf.shape[0] r1 = r/2+1 c = boxp.shape[1]-psf.shape[1] c1 = c/2+1 boxpslice = (slice(r1,psf.shape[0]+r1),slice(c1,psf.shape[1]+c1)) boxp[boxpslice] = psf.copy() from pyfft.cuda import Plan import pycuda.driver as cuda from pycuda.tools import make_default_context import pycuda.gpuarray as gpuarray cuda.init() context = make_default_context() stream = cuda.Stream() plan = Plan(boxp.shape,stream=stream) gdata = gpuarray.to_gpu(boxp.astype(numpy.complex64)) plan.execute(gdata) return gdata,boxd.shape,boxdslice,plan,stream
def get_instruction(self): """ A generator to get the instruction binary code for respective assembly code """ variable_address = 16 for line in self._lines: if '@' in line: # for A instruction instruction = '0' # if @21 then its direct accessing if line[1:].isdigit(): instruction += numpy.binary_repr(int(line[1:]), 15) # Predefined variables elif line[1:] in Parser.SYMBOLS: instruction += numpy.binary_repr(int(Parser.SYMBOLS[line[1:]]), 15) # user defined variables else: Parser.SYMBOLS[line[1:]] = variable_address instruction += numpy.binary_repr(variable_address, 15) variable_address += 1 yield instruction else: # for C instruction all the null cases are equal to '0' # hence initialized it with zero dest, rest, comp, jump = '0', '0', '0', '0' # to separate destination and rest of the code dest_rest = (['0', '0'] + list(line.split('='))) dest_rest.reverse() rest, dest = dest_rest[:2] # C Instruction fixed starting values instruction = '111' # from the rest to get computation and jump comp_jump = (['0', '0'] + list(rest.split(';'))) comp_jump.reverse() # if there is no JMP instruction and else if len(list(rest.split(';'))) == 1: comp, jump = comp_jump[0:2] else: jump, comp = comp_jump[0:2] instruction += Parser.INSTRUCTIONS[comp] +\ Parser.DESTINATION[dest] +\ Parser.JUMP[jump] yield instruction
def col_names(self): mode = self.parameters_dict.StateReadout.readout_mode names = np.array(range(self.output_size())[::-1])+1 if mode == 'pmt': if self.output_size==1: dependents = [('', 'prob dark ', '')] else: dependents = [('', 'num dark {}'.format(x), '') for x in names ] if mode == 'pmt_states': if self.output_size==1: dependents = [('', 'prob dark ', '')] else: dependents = [('', ' {} dark ions'.format(x-1), '') for x in names ] if mode == 'pmt_parity': if self.output_size==1: dependents = [('', 'prob dark ', '')] else: dependents = [('', ' {} dark ions'.format(x-1), '') for x in names[1:] ] dependents.append(('', 'Parity', '')) if mode == 'camera': dependents = [('', ' prob ion {}'.format(x), '') for x in range(self.output_size())] if mode == 'camera_states': num_of_ions=int(self.parameters_dict.IonsOnCamera.ion_number) names = range(2**num_of_ions) dependents=[] for name in names: temp= np.binary_repr(name,width=num_of_ions) temp = self.binary_to_state(temp) temp=('', 'Col {}'.format(temp), '') dependents.append(temp) if mode == 'camera_parity': num_of_ions=int(self.parameters_dict.IonsOnCamera.ion_number) names = range(2**num_of_ions) dependents=[] for name in names: temp= np.binary_repr(name,width=num_of_ions) temp = self.binary_to_state(temp) temp=('', 'Col {}'.format(temp), '') dependents.append(temp) dependents.append(('', 'Parity', '')) return dependents
def intToArray(i, length=0): """Convert an unsigned integer to a binary array. Args: i: unsigned integer length: padding to length (default: 0) Returns: binary array """ if length > 0: s = np.binary_repr(i, width=length) else: s = np.binary_repr(i) m = np.fromstring(s, 'u1') - ord('0') m = np.flipud(m) return m
def regionBit(self,name,init_region,bit_num,initial=False): """ Return the value of bit #bit_num in the bit-vector encoding of the currently selected region name (string): Unique identifier for region sensor (default="target") init_region (region): Name of the sensor whose state is interested bit_num (int): The index of the bit to return """ if initial: if not self.sensorListenInitialized: self._createSubwindow() if name not in self.sensorValue.keys(): # create a new map element # choose an initial (decomposed) region inside the desired one self.sensorValue[name] = self.proj.regionMapping[init_region][0] self.p_sensorHandler.stdin.write("loadproj," + self.proj.getFilenamePrefix() + ".spec,\n") self.p_sensorHandler.stdin.write(",".join(["region", name, self.sensorValue[name]]) + "\n") return True else: if name in self.sensorValue: reg_idx = self.proj.rfi.indexOfRegionWithName(self.sensorValue[name]) numBits = int(math.ceil(math.log(len(self.proj.rfi.regions),2))) reg_idx_bin = numpy.binary_repr(reg_idx, width=numBits) #print name, bit_num, (reg_idx_bin[bit_num] == '1') return (reg_idx_bin[bit_num] == '1') else: print "(SENS) WARNING: Region sensor %s is unknown!" % button_name return None
def usm(seqs_list): d = {} unique = list(set(''.join(seqs_list))) unique.sort() number_of_bits=int(math.ceil(np.log2(len(unique)))) for number, char in enumerate(unique): d[char] = number mat=[] for i,seq in enumerate(seqs_list): list_b = [np.binary_repr(numb, width=number_of_bits) for numb in [d[char] for char in seqs_list[i]]] matrix_usmc = np.zeros([len(seq)+2,number_of_bits*2]) #Forward Coordinates matrix_usmc[0,:number_of_bits] = matrix_usmc[len(seq)+1,number_of_bits:]= np.random.rand(number_of_bits) for j in range(1,len(seq)+1): binary = np.array([int(x) for x in str(list_b[j-1])]) matrix_usmc[j,:number_of_bits] = matrix_usmc[j-1,:number_of_bits]+(0.5*(1-matrix_usmc[j-1,:number_of_bits]))*binary -(0.5*matrix_usmc[j-1,:number_of_bits])*(1-binary) #Backward Coordinates for j in reversed(range(1,len(seq)+1)): binary = np.array([int(x) for x in str(list_b[j-1])]) matrix_usmc[j,number_of_bits:] = matrix_usmc[j+1,number_of_bits:]+(0.5*(1-matrix_usmc[j+1,number_of_bits:]))*binary - (0.5*matrix_usmc[j+1,number_of_bits:])*(1-binary) matrix_usmc = matrix_usmc[1:-1,:] mat.append(matrix_usmc) mat= np.array(mat) matrix = np.zeros([len(seqs_list), len(seqs_list)]) for i, j in itertools.combinations(range(0,len(seqs_list)),2): matrix[i][j] = matrix[j][i] = calc_usm_d(mat[i][:,:number_of_bits],mat[i][:,number_of_bits:],mat[j][:,:number_of_bits],mat[j][:,number_of_bits:]) return matrix
def getSecret(stegoMatrix,pos,skip,x=None): file = open("out.txt", "w") index=0 if(x==None): secret=int(math.sqrt(len(stegoMatrix)*len(stegoMatrix)/(8*skip))) else: secret=x dummy="" multiple=False if(skip<1): skip=1 multiple=True secretMatrix=np.zeros((secret,secret),dtype=np.complex_) for a in range(0,len(stegoMatrix)): for b in range(0,len(stegoMatrix)): c=np.binary_repr(int(stegoMatrix[a][b]),8) if(index%skip==0): dummy+=c[7-hash(a,b,pos)] if(multiple): dummy += c[4+hash(a, b, pos)] file.write(c+"\t"+"\n") index+=1 sindex=0 for a in range(0,min(len(dummy),secret*secret*8),8): secretMatrix[int(sindex/secret)][int(sindex%secret)]=int(dummy[a:a+8],2) sindex+=1 return secretMatrix
def findCalPattern(fpga,nBits=12,busName='bus2',bPlot=False,nSnaps=1): loadAllBitsCmd = 2**(nBits+1)-1 #all bits are 1 failPatterns = [] for delay in np.arange(0,32): #set all IODELAYs to the current delay fpga.write_int('dly_val',delay) for iBit in range(0,56): fpga.write_int('load_dly',iBit) time.sleep(.01) time.sleep(.1) #take a few snapshots of the ramp signal with this delay #and take note of which bits show glitches snapFailPatterns = [] for iSnap in xrange(nSnaps): snapDict = snapZdok(fpga) glitchDict = checkCurrentGlitches(fpga,snapDict[busName],nBits=nBits,bPlot=bPlot) failPattern = glitchDict['failPattern'] snapFailPatterns.append(failPattern) failPattern = np.bitwise_or.reduce(snapFailPatterns) failPatternStr = np.binary_repr(failPattern,width=nBits) print '{0:02d}'.format(delay),':',failPatternStr #plt.show() failPatterns.append(failPattern)
def all_bit_strings(bits, dtype='uint8'): """ Create a matrix of all binary strings of a given width as the rows. Parameters ---------- bits : int The number of bits to count through. dtype : str or dtype object The dtype of the returned array. Returns ------- bit_strings : ndarray, shape (2 ** bits, bits) The numbers from 0 to 2 ** bits - 1 as binary numbers, most significant bit first. Notes ----- Obviously the memory requirements of this are exponential in the first argument, so use with caution. """ return np.array([map(int, np.binary_repr(i, width=bits)) for i in xrange(0, 2 ** bits)], dtype=dtype)
def activate_network(self, num_activations=1): """Activates the Markov Network Parameters ---------- num_activations: int (default: 1) The number of times the Markov Network should be activated Returns ------- None """ original_input_values = np.copy(self.states[:self.num_input_states]) for _ in range(num_activations): for markov_gate, mg_input_ids, mg_output_ids in zip(self.markov_gates, self.markov_gate_input_ids, self.markov_gate_output_ids): # Determine the input values for this Markov Gate mg_input_values = self.states[mg_input_ids] mg_input_index = int(''.join([str(int(val)) for val in mg_input_values]), base=2) # Determine the corresponding output values for this Markov Gate roll = np.random.uniform() mg_output_index = np.where(markov_gate[mg_input_index, :] >= roll)[0][0] mg_output_values = np.array(list(np.binary_repr(mg_output_index, width=len(mg_output_ids))), dtype=np.uint8) self.states[mg_output_ids] = np.bitwise_or(self.states[mg_output_ids], mg_output_values) self.states[:self.num_input_states] = original_input_values
def count_sensitive_neighborhood_hash(g): """ Compute the count sensitive neighborhood hashed version of a graph. """ gnh = g.copy() g = array_labels_to_str(g) #iterate over every node in the graph for node in iter(g.nodes()): neighbors_labels = [g.node[n]["label"] for n in g.neighbors_iter(node)] #if node has no neighboors, nh is its own label if len(neighbors_labels) > 0: #count number of unique labels c = Counter(neighbors_labels) count_weighted_neighbors_labels = [] for label, c in c.iteritems(): label = str_to_array(label) c_bin = np.array( list(np.binary_repr( c, len(label) ) ), dtype=np.int64 ) label = np.bitwise_xor( label, c_bin) label = np.roll( label, c ) count_weighted_neighbors_labels.append( label ) x = count_weighted_neighbors_labels[0] for l in count_weighted_neighbors_labels[1:]: x = np.bitwise_xor( x, l) node_label = str_to_array(g.node[node]["label"]) csnh = np.bitwise_xor( np.roll( node_label, 1 ), x ) else: csnh = str_to_array(g.node[node]["label"]) gnh.node[node]["label"] = csnh return gnh
def demand(district_id, date, slot, bin_digits, db): rows = db.exe("select demand from gaps where district_id=%s and date='%s' and slot=%s" % (district_id, date, slot)) if rows: s = np.binary_repr(int(rows[0]["demand"]), width=bin_digits) return [int(x) for x in s], rows[0]["demand"] else: return [0 for i in xrange(bin_digits)], 0
def int_to_16_bit_array(inputInt): bit_string = np.binary_repr(inputInt) bit_array = [int(char) for char in bit_string] if len(bit_array) < 16: for i in range(16 - len(bit_array)): bit_array.insert(0, 0) return bit_array
def int2bin(self,num): action = numpy.zeros(6) actStr = numpy.binary_repr(num) for i in range(len(actStr)): action[i] = float(actStr[len(actStr)-1-i]) return action
def expandGenerators(proj): (phases, xs, zs) = proj newPhases, newXs, newZs = [], [], [] k = len(phases) if (k == 0): return ([], [], []) n = len(xs[0]) def ph(xs1, zs1, xs2, zs2): out = 0 for i in range(n): tup = (xs1[i], zs1[i], xs2[i], zs2[i]) if tup == (0, 1, 1, 0): out += 2 # Z*X if tup == (0, 1, 1, 1): out += 2 # Z*XZ if tup == (1, 1, 1, 0): out += 2 # XZ*X if tup == (1, 1, 1, 1): out += 2 # XZ*XZ return out for i in range(1, 2**k): # omit identity bitstring = list(np.binary_repr(i, width=k)) prod = (0, np.zeros(n), np.zeros(n)) for j in range(k): if bitstring[j] == '1': phplus = ph(prod[1], prod[2], xs[j], zs[j]) prod = (prod[0] + phases[j] + phplus, prod[1] + xs[j], prod[2] + zs[j]) newPhases.append(prod[0] % 4) newXs.append(prod[1] % 2) newZs.append(prod[2] % 2) return (newPhases, newXs, newZs)
def _getAllowedShapes(self, shape): ''' Return set of allowed shapes that can be squeezed into given shape. Examples -------- >>> PB = ParamBag() # fixing K,D doesn't matter >>> PB._getAllowedShapes(()) set([()]) >>> PB._getAllowedShapes((1)) set([(), (1,)]) >>> PB._getAllowedShapes((23)) set([(23)]) >>> PB._getAllowedShapes((3,1)) set([(3), (3,1)]) >>> PB._getAllowedShapes((1,1)) set([(), (1,), (1,1)]) ''' allowedShapes = set() if len(shape) == 0: allowedShapes.add(tuple()) return allowedShapes shapeVec = np.asarray(shape, dtype=np.int32) onesMask = shapeVec == 1 keepMask = np.logical_not(onesMask) nOnes = sum(onesMask) for b in range(2**nOnes): bStr = np.binary_repr(b) bStr = '0'*(nOnes - len(bStr)) + bStr keepMask[onesMask] = np.asarray([int(x) > 0 for x in bStr]) curShape = shapeVec[keepMask] allowedShapes.add(tuple(curShape)) return allowedShapes
def decimals_to_binary(decimals, n_bits): """Convert a sequence of decimal numbers to a sequence of binary numbers Parameters ---------- decimals : array-like Array of integers to convert. Must all be >= 0. n_bits : array-like Array of the number of bits to use to represent each decimal number. Returns ------- binary : list Binary representation. Notes ----- This function is useful for generating IDs to be stamped using the TDT. """ decimals = np.array(decimals, int) if decimals.ndim != 1 or (decimals < 0).any(): raise ValueError('decimals must be 1D with all nonnegative values') n_bits = np.array(n_bits, int) if decimals.shape != n_bits.shape: raise ValueError('n_bits must have same shape as decimals') if (n_bits <= 0).any(): raise ValueError('all n_bits must be positive') binary = list() for d, b in zip(decimals, n_bits): if d > 2 ** b - 1: raise ValueError('cannot convert number {0} using {1} bits' ''.format(d, b)) binary.extend([int(bb) for bb in np.binary_repr(d, b)]) assert len(binary) == n_bits.sum() # make sure we didn't do something dumb return binary
def split_number(number, size: int) -> str: """ Split a number into an 8-bit array for easy storage in memory. Parameters ---------- number : int Number to convert. size : [1, 2, 4, 8] Size of the list in bytes Returns ------- list(np.int8) The byte list representation of the number. Raises ------ ValueError If the number given is greater than the maximum value allowed by the array size. InvalidSize The size given is not in the list of allowed sizes. """ try: if abs(number) > max_value[size]: raise ValueError('{0} is too big for size {1}'.format(number, size)) except KeyError: raise InvalidSize(size) binary = np.binary_repr(number, width=(size * 8))[:size * 8] binary = [binary[curr * 8:(curr + 1) * 8] for curr in range(size)] return [eval('0b{0}'.format(num)) for num in binary]
#!/usr/bin/env python import numpy as np print('13, 17 bin format:') a, b = 13, 17 print(bin(a), bin(b)) print('13, 17 bit_and:') print(np.bitwise_and(13, 17)) print('13, 17 bit_or:') print(np.bitwise_or(13, 17)) print('\n13 bit invert:') print(np.invert(np.array([13], dtype=np.uint8))) print('13 bin:') print(np.binary_repr(13, width=8)) print('242 bin:') print(np.binary_repr(242, width=8)) print('\n10 left_shift 2:') print(np.left_shift(10, 2)) print('\n40 right_shift 2:') print(np.right_shift(40, 2))
def tobinarray(value): return np.array(list(np.binary_repr(value, width=36)), dtype=np.uint8)
def int2vec(x,dim=output_dim): out = np.zeros(dim) binrep = np.array(list(np.binary_repr(x))).astype('int') out[-len(binrep):] = binrep #print(out) return out
import matplotlib import matplotlib.pyplot as plt import numpy as np from PIL import Image img = Image.open('fractal.png').convert('L') arr = np.array(img, dtype=np.uint8) r, c = arr.shape bitPlanes = np.ndarray((8, r, c), dtype=np.uint8) for i in range(r): for j in range(c): binStr = np.binary_repr(arr[i, j], width=8) for k in range(8): bitPlanes[k, i, j] = int(binStr[k]) for i in range(8): plt.figure('bit plane-' + str(7 - i)) plt.imshow(bitPlanes[i], cmap='gray') plt.show()
def activation(t): bins = np.array([ bit == '1' for bit in np.binary_repr(pattern, width=num_inputs) ]) return 100.0 * (bins & (t <= t_stop))
def get_bit_rep(val, bit_width): val_bin = np.binary_repr(val, width=bit_width) val_arr = np.array(list(val_bin), dtype=np.uint8) return val_arr[::-1]
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit: # implement the Bernstein-Vazirani circuit zero = np.binary_repr(0, n) b = f(zero) # initial n + 1 bits input_qubit = QuantumRegister(n + 1, "qc") classicals = ClassicalRegister(n, "qm") prog = QuantumCircuit(input_qubit, classicals) # inverse last one (can be omitted if using O_f^\pm) prog.x(input_qubit[n]) # circuit begin prog.h(input_qubit[1]) # number=1 prog.h(input_qubit[2]) # number=38 prog.cz(input_qubit[0], input_qubit[2]) # number=39 prog.h(input_qubit[2]) # number=40 prog.cx(input_qubit[0], input_qubit[2]) # number=31 prog.h(input_qubit[2]) # number=42 prog.cz(input_qubit[0], input_qubit[2]) # number=43 prog.h(input_qubit[2]) # number=44 prog.x(input_qubit[2]) # number=36 prog.cx(input_qubit[0], input_qubit[2]) # number=37 prog.h(input_qubit[2]) # number=45 prog.cz(input_qubit[0], input_qubit[2]) # number=46 prog.h(input_qubit[2]) # number=47 prog.h(input_qubit[2]) # number=25 prog.cz(input_qubit[0], input_qubit[2]) # number=26 prog.h(input_qubit[2]) # number=27 prog.h(input_qubit[1]) # number=7 prog.cz(input_qubit[2], input_qubit[1]) # number=8 prog.rx(0.17592918860102857, input_qubit[2]) # number=34 prog.rx(-0.3989822670059037, input_qubit[1]) # number=30 prog.h(input_qubit[1]) # number=9 prog.h(input_qubit[1]) # number=18 prog.cz(input_qubit[2], input_qubit[1]) # number=19 prog.h(input_qubit[1]) # number=20 prog.y(input_qubit[1]) # number=14 prog.h(input_qubit[1]) # number=22 prog.cz(input_qubit[2], input_qubit[1]) # number=23 prog.h(input_qubit[1]) # number=24 prog.z(input_qubit[2]) # number=3 prog.z(input_qubit[1]) # number=41 prog.x(input_qubit[1]) # number=17 prog.y(input_qubit[2]) # number=5 prog.x(input_qubit[2]) # number=21 # apply H to get superposition for i in range(n): prog.h(input_qubit[i]) prog.h(input_qubit[n]) prog.barrier() # apply oracle O_f oracle = build_oracle(n, f) prog.append(oracle.to_gate(), [input_qubit[i] for i in range(n)] + [input_qubit[n]]) # apply H back (QFT on Z_2^n) for i in range(n): prog.h(input_qubit[i]) prog.barrier() # measure return prog
def bitstring(bits): return ''.join(str(int(b)) for b in bits) if __name__ == '__main__': qubit_count = 4 input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)] circuit = make_circuit(qubit_count, input_qubits) circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap') circuit_sample_count = 2820 info = cirq.final_state_vector(circuit) qubits = round(log2(len(info))) frequencies = { np.binary_repr(i, qubits): round( (info[i] * (info[i].conjugate())).real, 3) for i in range(2**qubits) } writefile = open("../data/startCirq_Class909.csv", "w+") print(format(frequencies), file=writefile) print("results end", file=writefile) print(circuit.__len__(), file=writefile) print(circuit, file=writefile) writefile.close()
return prog if __name__ == '__main__': key = "00000" f = lambda rep: str(int(rep == key)) prog = make_circuit(5,f) backend = BasicAer.get_backend('statevector_simulator') sample_shot =7924 info = execute(prog, backend=backend).result().get_statevector() qubits = round(log2(len(info))) info = { np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3) for i in range(2 ** qubits) } backend = FakeVigo() circuit1 = transpile(prog,backend,optimization_level=2) writefile = open("../data/startQiskit_Class1361.csv","w") print(info,file=writefile) print("results end", file=writefile) print(circuit1.depth(),file=writefile) print(circuit1,file=writefile) writefile.close()
#!/Users/zhiyang/anaconda3/bin/python3 ###!/usr/local/bin/python3 import random import numpy as np # This works! a = [int(i) for i in np.binary_repr(0b0111, 4)] print("0b0111 is:", a, ".") a = [int(i) for i in np.binary_repr(0b000000111, 9)] print("0b000000111 is:", a, ".") a = [int(i) for i in np.binary_repr(0b11010011, 4)] print("0b11010011 is:", a, ".") a = [int(i) for i in np.binary_repr(0b0100001, 7)] print("0b0100001 is:", a, ".") a = [int(i) for i in np.binary_repr(0b0001, 4)] print("0b0001 is:", a, ".") a = [int(i) for i in np.binary_repr(0b0000, 4)] print("0b0000 is:", a, ".") a = [int(i) for i in np.binary_repr(0b1001, 4)] print("0b1001 is:", a, ".") a = [int(i) for i in np.binary_repr(0b10000000000, 11)] print("0b10000000000 is:", a, ".") """ print("--------------------------------------------------") try: f = 0b834 except SyntaxError: print("A binary number, or integer in base 2, cannot contain numerical digits other than '0' and '1'.") print("Even if I try to catch the 'SyntaxError' exception, it will still throw/raise the 'SyntaxError' exception.") """
def check_binary_repr_0(self, level=rlevel): """Ticket #151""" assert_equal('0', N.binary_repr(0))
def __init__(self, start_state: int, end_state: int, total_target_qubits: int): """ Constructor """ self.__start_state = np.binary_repr(start_state, width=total_target_qubits) self.__end_state = np.binary_repr(end_state, width=total_target_qubits)
def __init__(self, qvm, qubits, steps=1, init_betas=None, init_gammas=None, cost_ham=None, ref_ham=None, driver_ref=None, minimizer=None, minimizer_args=None, minimizer_kwargs=None, rand_seed=None, vqe_options=None, store_basis=False): """ QAOA object. Contains all information for running the QAOA algorthm to find the ground state of the list of cost clauses. N.B. This only works if all the terms in the cost Hamiltonian commute with each other. :param qvm: (Connection) The qvm connection to use for the algorithm. :param qubits: (list of ints) The number of qubits to use for the algorithm. :param steps: (int) The number of mixing and cost function steps to use. Default=1. :param init_betas: (list) Initial values for the beta parameters on the mixing terms. Default=None. :param init_gammas: (list) Initial values for the gamma parameters on the cost function. Default=None. :param cost_ham: list of clauses in the cost function. Must be PauliSum objects :param ref_ham: list of clauses in the mixer function. Must be PauliSum objects :param driver_ref: (pyQuil.quil.Program()) object to define state prep for the starting state of the QAOA algorithm. Defaults to tensor product of \|+> states. :param rand_seed: integer random seed for initial betas and gammas guess. :param minimizer: (Optional) Minimization function to pass to the Variational-Quantum-Eigensolver method :param minimizer_kwargs: (Optional) (dict) of optional arguments to pass to the minimizer. Default={}. :param minimizer_args: (Optional) (list) of additional arguments to pass to the minimizer. Default=[]. :param minimizer_args: (Optional) (list) of additional arguments to pass to the minimizer. Default=[]. :param vqe_options: (optinal) arguents for VQE run. :param store_basis: (optional) boolean flag for storing basis states. Default=False. """ # Seed the random number generator, if a seed is provided. if rand_seed is not None: np.random.seed(rand_seed) # Set attributes values, considering their defaults self.qvm = qvm self.steps = steps self.qubits = qubits self.nstates = 2**len(qubits) self.cost_ham = cost_ham or [] self.ref_ham = ref_ham or [] self.minimizer = minimizer or optimize.minimize self.minimizer_args = minimizer_args or [] self.minimizer_kwargs = minimizer_kwargs or { 'method': 'Nelder-Mead', 'options': { 'disp': True, 'ftol': 1.0e-2, 'xtol': 1.0e-2 } } self.betas = init_betas or np.random.uniform(0, np.pi, self.steps)[::-1] self.gammas = init_gammas or np.random.uniform(0, 2 * np.pi, self.steps) self.vqe_options = vqe_options or {} self.ref_state_prep = (driver_ref or pq.Program([H(i) for i in self.qubits])) if store_basis: self.states = [ np.binary_repr(i, width=len(self.qubits)) for i in range(self.nstates) ] # Check argument types if not isinstance(self.cost_ham, (list, tuple)): raise TypeError("cost_ham must be a list of PauliSum objects.") if not all([isinstance(x, PauliSum) for x in self.cost_ham]): raise TypeError("cost_ham must be a list of PauliSum objects") if not isinstance(self.ref_ham, (list, tuple)): raise TypeError("ref_ham must be a list of PauliSum objects") if not all([isinstance(x, PauliSum) for x in self.ref_ham]): raise TypeError("ref_ham must be a list of PauliSum objects") if not isinstance(self.ref_state_prep, pq.Program): raise TypeError("Please provide a pyQuil Program object " "to generate initial state.")
def next_move(self, sensors): # Indicate that we are at the start of a new move move_complete = False ## Check to see if we have reached the goal or not if self.location == self.goal: self.found_goal = True # Update the wall_map array self.look_for_walls(self.heading, sensors) # Keep track of the cells we know nothing about no_knowledge = [] # Initialize an array to keep track of the cells that we know something about info_on = [[0 for a in range(self.dimensions)] for b in range(self.dimensions)] # If we have visited or updated the walls of a cell, record that we know something about that cell for a in range(self.dimensions): for b in range(self.dimensions): if int(self.visited[a][b]) == 1 or int( self.updated_the_walls[a][b]) == 1: info_on[a][b] += 1 # If we have not found the goal, perform the flood fill algorithm if self.found_goal == False: self.flood_algorithm(self.goal[0], self.goal[1]) # If we have found the goal, block all cell that we know nothing about elif self.found_goal == True and self.ready_to_reset == False: for a in range(self.dimensions): for b in range(self.dimensions): if info_on[a][b] == 0: # Keep track of the cells we know nothing about no_knowledge.append([a, b]) if len(no_knowledge) > 0: for item in no_knowledge: # Block off all cells we have no knowledge about self.wall_map[item[0]][item[1]] = 0 # Now add adjacent walls in the adjacent cells for a in range(len(self.wall_map)): for b in range(len(self.wall_map)): # If we haven't been there (if we had, we would know all about it and the walls would be there already) if self.visited[a][b] == 0: if self.isxy_inmaze([a - 1, b]): # Add the appropriate wall if the appropriate wall doesnt already exist if int( np.binary_repr(self.wall_map[a - 1][b], width=4)[2]) == 1: self.wall_map[a - 1][b] -= 2 if self.isxy_inmaze([a, b - 1]): if int( np.binary_repr(self.wall_map[a][b - 1], width=4)[3]) == 1: self.wall_map[a][b - 1] -= 1 if self.isxy_inmaze([a + 1, b]): if int( np.binary_repr(self.wall_map[a + 1][b], width=4)[0]) == 1: self.wall_map[a + 1][b] -= 8 if self.isxy_inmaze([a, b + 1]): if int( np.binary_repr(self.wall_map[a][b + 1], width=4)[1]) == 1: self.wall_map[a][b + 1] -= 4 ready_to_reset = True # Perform the necessary task to reset the robot if self.first_run_complete == False: print "Reset in Progress" rotation = 'Reset' movement = 'Reset' # Reset Robot the_move = [0, 0] self.heading = 'up' print "Final distances:" for item in self.dist_to_g: print item print "Final wall map:" for item in self.wall_map: print item print "The_move set" # Reset distances to goal self.flood_algorithm(self.goal[0], self.goal[1]) # Raise flag to indicate that the first run is complete self.first_run_complete = True ###################################################################################### # This next block of code controls the movement implementation for the EXPLORATORY RUN # Keep track of the potential moves move_list = [] # Keep track of the distance to goal for the cells reached by the potential moves dist_list = [] # Only do this if we haven't already completed the first run if self.first_run_complete == False: # Convert wall number in current location to a binary number binary = np.binary_repr( self.wall_map[self.location[0]][self.location[1]], width=4) # Go through each direction for a in range(4): # If no wall exists in that direction if int(binary[a]) == 1: # Save the cell one away in that direction as a possible next move new_loc = [ self.location[0] + self.moves[a][0], self.location[1] + self.moves[a][1] ] # Check if the possible move exists if self.isxy_inmaze(new_loc): # Save possible cells we can move to move_list.append([new_loc[0], new_loc[1]]) # Keep track of the distance to goal of these possible cells dist_list.append( self.dist_to_g[new_loc[0]][new_loc[1]]) if len(move_list) > 0: # Pick the move that yields the smallest distance to goal chosen_move = move_list[np.argmin(dist_list)] # Determine the change along each direction diff_x = chosen_move[0] - self.location[0] diff_y = chosen_move[1] - self.location[1] # Determine rotation and movement based on the chosen next cell if self.heading == 'up': if diff_x == 0 and diff_y == 1: rotation = 0 movement = 1 if diff_x == 1 and diff_y == 0: rotation = 90 movement = 1 self.heading = 'right' if diff_x == 0 and diff_y == -1: rotation = 0 movement = -1 self.heading = 'up' if diff_x == -1 and diff_y == 0: rotation = -90 movement = 1 self.heading = 'left' elif self.heading == 'down': if diff_x == 0 and diff_y == 1: rotation = 0 movement = -1 self.heading = 'down' if diff_x == 1 and diff_y == 0: rotation = -90 movement = 1 self.heading = 'right' if diff_x == 0 and diff_y == -1: rotation = 0 movement = 1 if diff_x == -1 and diff_y == 0: rotation = 90 movement = 1 self.heading = 'left' elif self.heading == 'left': if diff_x == 0 and diff_y == 1: rotation = 90 movement = 1 self.heading = 'up' if diff_x == 1 and diff_y == 0: rotation = 0 movement = -1 self.heading = 'left' if diff_x == 0 and diff_y == -1: rotation = -90 movement = 1 self.heading = 'down' if diff_x == -1 and diff_y == 0: rotation = 0 movement = 1 elif self.heading == 'right': if diff_x == 0 and diff_y == 1: rotation = -90 movement = 1 self.heading = 'up' if diff_x == 1 and diff_y == 0: rotation = 0 movement = 1 if diff_x == 0 and diff_y == -1: rotation = 90 movement = 1 self.heading = 'down' if diff_x == -1 and diff_y == 0: rotation = 0 movement = -1 self.heading = 'right' ################################################################################# # This next block of code controls the movement implementation for the SECOND RUN # Only perform this part if the first run is complete if self.first_run_complete == True: # Define possible moves for each heading moves = { 'up': [ [-1, 0], [0, 1], [1, 0], ], 'down': [[1, 0], [0, -1], [-1, 0]], 'left': [ [0, -1], [-1, 0], [0, 1], ], 'right': [ [0, 1], [1, 0], [0, -1], ], } # Keep track of the potential cells to move to potential_move_list = [] # If we see a distance of more than 3, change the distance to 3 since we can only move 3 anyways for i in range(len(sensors)): if sensors[i] > 3: sensors[i] = 3 # Iterate through the sensor directions 0,1 and 2 for i in range(3): # Set a flag to indicate we haven't yet found a possible move in that direction found = False # Iterate from sensor reading to 0 for a in reversed(range(0, sensors[i] + 1)): # Only consider directions in which the sensor does not show 0 if sensors[i] != 0: # Check to see if we have found a possible move in that direction if found == False: # Save the cell that is (a) moves away in the (i) direction check_point = [ self.location[0] + a * moves[self.heading][i][0], self.location[1] + a * moves[self.heading][i][1] ] # Check if every step brings us closer and check to make sure the point is in the maze # Do not consider self.moves with a distance of 0 if a != 0: if self.dist_to_g[check_point[0]][ check_point[1]] + a == self.dist_to_g[ self.location[0]][self.location[ 1]] and self.isxy_inmaze([ check_point[0], check_point[1] ]) == True: # Add the move to the list of possible self.moves potential_move_list.append( [check_point[0], check_point[1], a]) # Flag to say that we found a move in that direction so that we can move on to looking at the next direction found = True if len(potential_move_list) > 0 and self.location != self.goal: # Sort the potential_move_list starting with the moves with the lowest distance potential_move_list.sort(key=lambda x: int(x[2])) # Reverse the list potential_move_list.reverse() # Pick a move with the highest distance the_move = potential_move_list.pop(0) # Now we need to return the rotation and movement based on the move selected # Calculate the distance to move in each direction dx = the_move[0] - self.location[0] dy = the_move[1] - self.location[1] # Set default rotation rotation = 0 if self.heading == 'up': if dx == 0 and dy > 0: movement = the_move[2] elif dx > 0 and dy == 0: rotation = 90 movement = the_move[2] self.heading = 'right' elif dx < 0 and dy == 0: rotation = -90 movement = the_move[2] self.heading = 'left' elif self.heading == 'down': if dx > 0 and dy == 0: rotation = -90 movement = the_move[2] self.heading = 'right' elif dx == 0 and dy < 0: movement = the_move[2] elif dx < 0 and dy == 0: rotation = 90 movement = the_move[2] self.heading = 'left' elif self.heading == 'left': if dx == 0 and dy > 0: rotation = 90 movement = the_move[2] self.heading = 'up' elif dx == 0 and dy < 0: rotation = -90 movement = the_move[2] self.heading = 'down' elif dx < 0 and dy == 0: movement = the_move[2] elif self.heading == 'right': if dx == 0 and dy > 0: rotation = -90 movement = the_move[2] self.heading = 'up' elif dx > 0 and dy == 0: movement = the_move[2] elif dx == 0 and dy < 0: rotation = 90 movement = the_move[2] self.heading = 'down' # This is where we actually make the robot move if self.first_run_complete == False: self.location = chosen_move else: self.location = the_move # Record that we have visited the current location self.visited[self.location[0]][self.location[1]] = 1 # Return the movement and rotation specifications return rotation, movement
np.roll(A, (1, 2)) np.roll(B, 1) np.rollaxis(A, 0, 1) np.moveaxis(A, 0, 1) np.moveaxis(A, (0, 1), (1, 2)) np.cross(B, A) np.cross(A, A) np.indices([0, 1, 2]) np.indices([0, 1, 2], sparse=False) np.indices([0, 1, 2], sparse=True) np.binary_repr(1) np.base_repr(1) np.allclose(i8, A) np.allclose(B, A) np.allclose(A, A) np.isclose(i8, A) np.isclose(B, A) np.isclose(A, A) np.array_equal(i8, A) np.array_equal(B, A) np.array_equal(A, A)
def look_for_walls(self, direction, sense): price = { 'up': [8, 1, 2], 'down': [2, 4, 8], 'left': [4, 8, 1], 'right': [1, 2, 4] } h = { 'up': [-1, 0, 1, 0, 1, 0, 'down', 1, 0], 'down': [1, 0, -1, 0, -1, 0, 'up', -1, 0], 'left': [0, -1, 0, -1, 0, 1, 'right', 0, 1], 'right': [0, 1, 0, 1, 0, -1, 'left', 0, -1] } index = { 'up': [0, 3, 2], 'down': [2, 1, 0], 'left': [1, 0, 3], 'right': [3, 2, 1] } # Save location of the grid cell in which a wall is being sensed location_left_sensor = [ self.location[0] + h[direction][0] * sense[0], self.location[1] + h[direction][3] * sense[0] ] location_forward_sensor = [ self.location[0] + h[direction][1] * sense[1], self.location[1] + h[direction][4] * sense[1] ] location_right_sensor = [ self.location[0] + h[direction][2] * sense[2], self.location[1] + h[direction][5] * sense[2] ] # Save the wall_map value of the grid cells being sensed left_sensed_wall = self.wall_map[self.location[0] + h[direction][0] * sense[0]][self.location[1] + h[direction][3] * sense[0]] forward_sensed_wall = self.wall_map[self.location[0] + h[direction][1] * sense[1]][ self.location[1] + h[direction][4] * sense[1]] right_sensed_wall = self.wall_map[self.location[0] + h[direction][2] * sense[2]][self.location[1] + h[direction][5] * sense[2]] # Save the location of the grid cell next to the one we are sensing in the direction of sensing next_left = [ self.location[0] + h[direction][0] * sense[0] - h[direction][7], self.location[1] + h[direction][3] * sense[0] - h[direction][8] ] next_forwards = [ self.location[0] + h[direction][1] * sense[1] - h[direction][8], self.location[1] + h[direction][4] * sense[1] + h[direction][7] ] next_right = [ self.location[0] + h[direction][2] * sense[2] + h[direction][7], self.location[1] + h[direction][5] * sense[2] + h[direction][8] ] # Here, we update the wall that we sense to our left # First convert the wall number to binary and check if there is a wall in the appropriate position already if int(np.binary_repr(left_sensed_wall, width=4)[index[direction][0]]) == 1: # If no wall, adjust the wall number to add the appropriate wall self.wall_map[location_left_sensor[0]][ location_left_sensor[1]] -= price[direction][0] # Keep track of the fact we have updated the cell self.updated_the_walls[location_left_sensor[0]][ location_left_sensor[1]] = 1 # Check to see if the next cell in the direction of sensing actually exists if self.isxy_inmaze(next_left): # Add the appropriate wall (opposite compared to above) self.wall_map[next_left[0]][next_left[1]] -= price[h[direction] [6]][0] # Record that we know something about that cell self.updated_the_walls[next_left[0]][next_left[1]] = 1 # Here, we update the wall that we sense in front if int( np.binary_repr(forward_sensed_wall, width=4)[index[direction][1]]) == 1: self.wall_map[location_forward_sensor[0]][ location_forward_sensor[1]] -= price[direction][1] self.updated_the_walls[location_forward_sensor[0]][ location_forward_sensor[1]] = 1 if self.isxy_inmaze(next_forwards): self.wall_map[next_forwards[0]][next_forwards[1]] -= price[ h[direction][6]][1] self.updated_the_walls[next_forwards[0]][next_forwards[1]] = 1 # Here, we update the wall that we sense to the right if int( np.binary_repr(right_sensed_wall, width=4)[index[direction][2]]) == 1: self.wall_map[location_right_sensor[0]][ location_right_sensor[1]] -= price[direction][2] self.updated_the_walls[location_right_sensor[0]][ location_right_sensor[1]] = 1 if self.isxy_inmaze(next_right): self.wall_map[next_right[0]][next_right[1]] -= price[ h[direction][6]][2] self.updated_the_walls[next_right[0]][next_right[1]] = 1
def to_binary(n): return [bool(int(x)) for x in np.binary_repr(n, speaker_classes.size)][::-1]
def binary_repr(num, width=None): """Return the binary representation of the input number as a string. .. seealso:: :func:`numpy.binary_repr` """ return _numpy.binary_repr(num, width)
def get_output(self, quantum_instance, params=None, shots=None): """ Get classical data samples from the generator. Running the quantum generator circuit results in a quantum state. To train this generator with a classical discriminator, we need to sample classical outputs by measuring the quantum state and mapping them to feature space defined by the training data. Args: quantum_instance (QuantumInstance): Quantum Instance, used to run the generator circuit. params (numpy.ndarray): array or None, parameters which should be used to run the generator, if None use self._params shots (int): if not None use a number of shots that is different from the number set in quantum_instance Returns: list: generated samples, array: sample occurrence in percentage """ instance_shots = quantum_instance.run_config.shots q = QuantumRegister(sum(self._num_qubits), name='q') qc = QuantumCircuit(q) if params is None: params = self._bound_parameters qc.append(self.construct_circuit(params), q) if quantum_instance.is_statevector: pass else: c = ClassicalRegister(sum(self._num_qubits), name='c') qc.add_register(c) qc.measure(q, c) if shots is not None: quantum_instance.set_config(shots=shots) result = quantum_instance.execute(qc) generated_samples = [] if quantum_instance.is_statevector: result = result.get_statevector(qc) values = np.multiply(result, np.conj(result)) values = list(values.real) keys = [] for j in range(len(values)): keys.append(np.binary_repr(j, int(sum(self._num_qubits)))) else: result = result.get_counts(qc) keys = list(result) values = list(result.values()) values = [float(v) / np.sum(values) for v in values] generated_samples_weights = values for i, _ in enumerate(keys): index = 0 temp = [] for k, p in enumerate(self._num_qubits): bin_rep = 0 j = 0 while j < p: bin_rep += int(keys[i][index]) * 2 ** (int(p) - j - 1) j += 1 index += 1 if len(self._num_qubits) > 1: temp.append(self._data_grid[k][int(bin_rep)]) else: temp.append(self._data_grid[int(bin_rep)]) generated_samples.append(temp) # self.generator_circuit._probabilities = generated_samples_weights if shots is not None: # Restore the initial quantum_instance configuration quantum_instance.set_config(shots=instance_shots) return generated_samples, generated_samples_weights
n = 7 ## dimension of input data, user-defined m = 2 ** n ## number of data points m_2 = 2 ** (n - 1) m_3 = 2 ** (n - 2) layer_num = 3 ## number of layers of the neural network, user-defined neu = 40 ## neurons per layer epochs = 50 ## training time mean = 0.0 ## mean of initialization scale = 1.0 ## var of initialization ## data: 7 * 128 data = np.zeros([2 ** n, n], dtype=np.float32) for i in range(2 ** n): bin = np.binary_repr(i, n) a = np.array(list(bin), dtype=int) data[i, :] = a data = torch.from_numpy(data) ## generate training set and inference set XTrain = torch.zeros(m_2, n) XTest = torch.zeros(m_2, n) for i in range(m_2): XTrain[i, :] = data[i, :] XTest[i, :] = data[i + m_2, :] ## choose target, need to choose targets of different LVC targets, YTrains, YTests, TLVS = [], [], [], [] ## target of LVC: 7
def int2bool(x, width=None): """function for decimal to binary""" if not width: return [int(x) for x in reversed(list(np.binary_repr(x)))] else: return [int(x) for x in reversed(list(np.binary_repr(x, width)))]
def bitarray(num, bits): return np.array(list(np.binary_repr(num).zfill(bits))).astype(np.int8)
def test_binary_repr_0_width(self, level=rlevel): assert_equal(np.binary_repr(0, width=3), '000')
def test_binary_repr_0(self, level=rlevel): """Ticket #151""" assert_equal('0', np.binary_repr(0))
def powerset(n): powerset = [] for i in range(1 << n): powerset.append(tuple([int(_) for _ in np.binary_repr(i, width=n)])) return powerset
import numpy as np d = 5 P = 1 D = 100 numbers = np.random.random_integers(0, 2**d - 1, size=[4]) vertices = [(np.binary_repr(n, width=d)) for n in numbers] vertices_array = np.array([list(v) for v in vertices]).astype(np.uint8) data_X = np.zeros(shape=(P, d * D)) data_Y = np.zeros(shape=(P, 1)) for p in range(P): ind = np.random.random_integers(0, 3) # print(ind) if ind < 2: v = 1 else: v = 0 # print(v) data_Y[p, 0] = v u_base = vertices_array[ind] # print(u_base) u1 = np.repeat(u_base[:, np.newaxis], int(0.05 * D), axis=1) u1 = u1 + np.random.normal(loc=0, scale=1.0, size=np.shape(u1)) # print(u1) u2_const = np.random.uniform(low=-1,
def get_ind_from_key(self, key, dim="r"): ind = [0, 0, 0] br = np.binary_repr(key, width=self.level * 3) for dim in range(3): ind[dim] = int(br[self.dim_slices[dim]], 2) return ind
def num2bit(state, L): return np.binary_repr(state, L)
# Creating registers with n qubits n = 16 # for a local backend n can go as up as 23, after that it raises a Memory Error qr = QuantumRegister(n, name='qr') cr = ClassicalRegister(n, name='cr') # Quantum circuit for alice state alice = QuantumCircuit(qr, cr, name='Alice') # Generate a random number in the range of available qubits [0,65536)) alice_key = np.random.randint(0, high=2**n) # Cast key to binary for encoding # range: key[0]-key[15] with key[15] least significant figure alice_key = np.binary_repr(alice_key, n) # n is the width # Encode key as alice qubits # IBM's qubits are all set to |0> initially for index, digit in enumerate(alice_key): if digit == '1': alice.x(qr[index]) # if key has a '1', change state to |1> # Switch randomly about half qubits to diagonal basis alice_table = [] # Create empty basis table for index in range(len(qr)): # BUG: enumerate(q) raises an out of range error if 0.5 < np.random.random(): # With 50% chance... alice.h(qr[index]) # ...change to diagonal basis alice_table.append('X') # character for diagonal basis else: alice_table.append('Z') # character for computational basis
def get_slice_key(self, ind, dim="r"): slb = np.binary_repr(ind, width=self.level) expanded = np.array([0] * self.level * 3, dtype="c") expanded[self.dim_slices[dim]] = slb return int(expanded.tostring(), 2)