def find_bezier_t_intersecting_with_closedpath(bezier_point_at_t, inside_closedpath, t0=0., t1=1., tolerence=0.01): """ Find a parameter t0 and t1 of the given bezier path which bounds the intersecting points with a provided closed path(*inside_closedpath*). Search starts from *t0* and *t1* and it uses a simple bisecting algorithm therefore one of the end point must be inside the path while the orther doesn't. The search stop when |t0-t1| gets smaller than the given tolerence. value for - bezier_point_at_t : a function which returns x, y coordinates at *t* - inside_closedpath : return True if the point is insed the path """ start = bezier_point_at_t(t0) end = bezier_point_at_t(t1) start_inside = inside_closedpath(start) end_inside = inside_closedpath(end) if not xor(start_inside, end_inside): raise ValueError("the segment does not seemed to intersect with the path") while 1: if (start[0]-end[0])**2 + (start[1]-end[1])**2 < tolerence**2: return t0, t1 middle_t = 0.5*(t0+t1) middle = bezier_point_at_t(middle_t) middle_inside = inside_closedpath(middle) if xor(start_inside, middle_inside): t1 = middle_t end = middle end_inside = middle_inside else: t0 = middle_t start = middle start_inside = middle_inside
def solution059(): max_decoded = 0 for a in range(97,123): for b in range(97,123): for c in range(97,123): password = [a,b,c] decoded_char_count = 0 decoded_message = [] for char in range(len(coded_message)): cycle = char%3 decoded_char = operator.xor(password[cycle],coded_message[char]) if (decoded_char in range(65,90)) or (decoded_char in range(97,123)) or (decoded_char == 32): decoded_char_count += 1 decoded_message.append(decoded_char) percent_decoded = decoded_char_count*1.0/len(decoded_message) if percent_decoded > max_decoded: print str(chr(a)) + str(chr(b)) + str(chr(c)), percent_decoded max_decoded = percent_decoded if percent_decoded > 0.95: print "password: ", str(chr(a)) + str(chr(b)) + str(chr(c)) ascii_sum = 0 for char in range(len(coded_message)): cycle = char%3 decoded_char = operator.xor(password[cycle],coded_message[char]) ascii_sum += decoded_char return ascii_sum
def getNum(bitList,n,k): numList = [] numList.append(bitList[0]) count = 1 for i in range(1,min(n,k)): curBit = xor(bitList[i],bitList[i-1]) numList.append(curBit) count += 1 i = 0 curBit = numList[i] prevVal = bitList[k-1] #print numList #print bitList for j in range(k,n): #print "curBit : " + str(curBit) + " prevVal : " + str(prevVal) curVal = xor(curBit,prevVal) #print "curVal : " + str(curVal) bitToAppend = xor(curVal,bitList[j]) #print "bitToAppend : " + str(bitToAppend) numList.append(bitToAppend) i += 1 curBit = numList[i] prevVal = bitList[j] return numList
def analyzeMap(self, isRed, legalpositions, layout): self.width = max([p[0] for p in legalpositions]) otherwidth = layout.width self.halfway = self.width/2 self.ourside = [p for p in legalpositions if operator.xor(p[0]>self.halfway, isRed)] self.theirside = [p for p in legalpositions if not operator.xor(p[0]>self.halfway, isRed)] ourSideLayout = layout.deepCopy() x=-1 if(isRed): x=1 sideToWallOff = self.halfway + x for i in range(layout.height): ourSideLayout.walls[sideToWallOff][i]=True self.distancer = distanceCalculator.Distancer(layout) self.distancer.getMazeDistances() self.ourSideDistancer = distanceCalculator.Distancer(ourSideLayout) self.ourSideDistancer.getMazeDistances() ourborder = [] for pos in self.ourside: if len([p for p in self.theirside if util.manhattanDistance(p, pos)==1]): ourborder.append(pos) return ourborder
def keysGenerator(key0): keys = [] key0InMatrix = toMatrix(key0, 4) keys.append(key0InMatrix) key1 = [[], [], [], []] key2 = [[], [], [], []] poly = [0, 0, 0, 1] for i in range(4): tmp = xor(poly[i], key0InMatrix[0][i]) tmp = xor(tmp, (SboxE[tuple(key0InMatrix[3])])[i]) key1[0].append(tmp) for i in range(4): key1[2].append(xor(key0InMatrix[2][i], key1[0][i])) for i in range(4): key1[1].append(xor(key0InMatrix[1][i], key1[2][i])) for i in range(4): key1[3].append(xor(key0InMatrix[3][i], key1[1][i])) keys.append(key1) poly = [0, 0, 1, 0] for i in range(4): tmp = xor(poly[i], key1[0][i]) tmp = xor(tmp, (SboxE[tuple(key1[3])])[i]) key2[0].append(tmp) for i in range(4): key2[2].append(xor(key1[2][i], key2[0][i])) for i in range(4): key2[1].append(xor(key1[1][i], key2[2][i])) for i in range(4): key2[3].append(xor(key1[3][i], key2[1][i])) keys.append(key2) return keys
def unobfuscate(data): """ Unobfuscate a xor obfuscated string """ out = "" x=len(data) - 1 import operator while x>1: apos=data[x] aprevpos=data[x-1] epos=chr(operator.xor(ord(apos), ord(aprevpos))) out = "".join((out, epos)) x -= 1 out=str(reduce(lambda x, y: y + x, out)) e2, a2 = data[1], data[0] a1=chr(operator.xor(ord(a2), ord(e2))) a1 = "".join((a1, out)) out = a1 e1,a1=out[0], data[0] a0=chr(operator.xor(ord(a1), ord(e1))) a0 = "".join((a0, out)) out = a0 return out
def embedSecret(self, text): bits = [] bits = text_to_bits(text) delimiter = [0,0,1,0,1,1,1,0,0,0,1,0,0,0,0,1] #text= ".!" b = 0 d = 0 for i in range(self.rows): for j in range(self.cols): pixel = self.image[i,j] for k in range(0, 3): if len(bits) + len(delimiter) == b + d: break if b == len(bits) and d < len(delimiter): if pixel[k]%2 != 0 and delimiter[d] == 0: self.image[i,j][k] = self.image[i,j][k] - 1 if pixel[k]%2 == 0: self.image[i,j][k] = xor(self.image[i,j][k], delimiter[d]) d += 1 else: if pixel[k]%2 != 0 and bits[b] == 0: self.image[i,j][k] = self.image[i,j][k] - 1 if pixel[k]%2 == 0: self.image[i,j][k] = xor(self.image[i,j][k], bits[b]) b += 1 return self.image
def SieveOfAtkins(limit): primes = [2, 3] isPrime = [False]*(limit) root = int(math.sqrt(limit)) for x in range(1, root): for y in range(1, root): an = (4*x*x) + (y*y) if(an <= limit and ((an % 12 == 1) or (an % 12 == 5))): isPrime[an] = operator.xor(isPrime[an], True) bn = (3*x*x) + (y*y) if(bn <= limit and bn % 12 == 7): isPrime[bn] = operator.xor(isPrime[bn], True) cn = (3*x*x) - (y*y) if(x > y and cn <= limit and cn % 12 == 11): isPrime[cn] = operator.xor(isPrime[cn], True) for z in range(5, root): if(isPrime[z] == True): s = z*z for a in range(s, limit, s): isPrime[a] = False for b in range(5, limit, 2): if(isPrime[b] == True): primes.append(b) return primes
def getaccess(mask, ditdah, salt, now): fn = [] tn = [] ditdah = ditdah.replace(" ", "") idx = -1 ddidx = -1 tidx = -1 if(now[-2:] == "00" or now[-2:] == "30"): nstr = str(now) + str(salt) else: nstr = str(salt) for ltr in mask: idx += 1 ddidx += 1 tidx +=1 if(ddidx >= len(ditdah)): ddidx = 0 if(tidx >= len(nstr)): tidx = 0 fn.append(chr(xor(xor(ord(ltr), ord(ditdah[ddidx:ddidx+1])), ord(nstr[tidx:tidx+1])))) fnstr = "".join(map(str, fn)) return fnstr
def __setPWD__(self, versionheader): if self.dbversion == MDBDefinitionMarkers.VJET3: mdbPwdField = versionheader[MDBDefinitionMarkers.PWDOFFSET : MDBDefinitionMarkers.PWDOFFSET + MDBDefinitionMarkers.JET3PWDLEN] i = 0 for x in MDBDefinitionMarkers.mdb97pwd: self.dbpwd = self.dbpwd + chr(xor(x, ord(mdbPwdField[i]))) i+=1 elif self.dbversion >= MDBDefinitionMarkers.VJET4: mdbPwdField = versionheader[MDBDefinitionMarkers.PWDOFFSET : MDBDefinitionMarkers.PWDOFFSET + MDBDefinitionMarkers.JET4PWDLEN] mdb2000key = struct.unpack('<H', versionheader[MDBDefinitionMarkers.PWDKEYOFFSET : MDBDefinitionMarkers.PWDKEYOFFSET + MDBDefinitionMarkers.PWDKEYLEN])[0] mdbKey = xor(MDBDefinitionMarkers.mdb2000xormask, mdb2000key) # Convert password field to little endian shorts i = 0 pwdlist = [] while i < len(mdbPwdField): var = mdbPwdField[i:i+2] pwdlist.append(struct.unpack('<H', var)[0]) # also like var[:2][::-1] syntax i+=2 # XOR Password field with default password field and then further XOR w/ # mask which varies with each database (db creation date as short?) i = 0 for defaultVal in MDBDefinitionMarkers.mdb2000pwd: val = xor(defaultVal, pwdlist[i]) if val < 256: self.dbpwd = self.dbpwd + chr(val) else: self.dbpwd = self.dbpwd + chr(xor(val, mdbKey)) i+=1 if binascii.hexlify(self.dbpwd[0]) == '00': self.dbpwd = "Null"
def update_crc(c, s): len_s = len(s) n = 0 while n < len_s: c = operator.xor(CRC_TABLE[operator.xor(c, ord(s[n])) & 255], (c >> 8)) n += 1 return c
def singleNumber(self, A): answer = xor(A[0], A[1]) for index in range(2,len(A)): answer = xor(answer, A[index]) return answer # test = Solution() # print test.singleNumber(A)
def calc_onehash(shingle, seed): def c4_hash(shingle): h = struct.unpack('<i',shingle)[0] return h % ((sys.maxsize + 1) * 2) if self.sh_type == 'c4': return operator.xor(c4_hash(shingle), long(seed)) % self.modulo else: return operator.xor(compute_positive_hash(shingle), long(seed)) % self.modulo
def validation_cycle(kernel, ngram, class_good_oases, class_good_trinity, reads_names, reads_seq, fileout_suffix): #print ("thread started") oases_training_set = class_good_oases[0:int(len(class_good_oases)*0.75)] trinity_training_set = class_good_trinity[0:int(len(class_good_trinity)*0.75)] oases_test_set = class_good_oases[int(len(class_good_oases)*0.75)+1:int(len(class_good_oases)-1)] trinity_test_set = class_good_trinity[int(len(class_good_trinity)*0.75)+1:int(len(class_good_trinity)-1)] (vectorizer_o, classifier_o) = train_classifier(oases_training_set, reads_seq, kernel, 2, ngram, kernel) (vectorizer_t, classifier_t) = train_classifier(trinity_training_set, reads_seq, kernel, 2, ngram, kernel) process_list = [] p = multiprocessing.Process(target=classify, args=(vectorizer_o, classifier_o, 10000, "validation/oases_classified-" + str(ngram) + "-" + kernel.keys()[0] + "-" + str(kernel[kernel.keys()[0]]) + "-" + fileout_suffix + "-must-fit", reads_names, oases_test_set)) process_list.append(p) p.start() p = multiprocessing.Process(target=classify, args=(vectorizer_t, classifier_t, 10000, "validation/trinity_classified-" + str(ngram) + "-" + kernel.keys()[0] + "-" + str(kernel[kernel.keys()[0]]) + "-" + fileout_suffix + "-must-fit", reads_names, trinity_test_set)) process_list.append(p) p.start() test_list = list(operator.xor(set(reads_seq), set(class_good_oases))) p = multiprocessing.Process(target=classify, args=(vectorizer_o, classifier_o, 10000, "validation/oases_classified-" + str(ngram) + "-" + kernel.keys()[0] + "-" + str(kernel[kernel.keys()[0]]) + "-" + fileout_suffix + "-must-not-fit", reads_names, test_list[0:10000])) process_list.append(p) p.start() test_list = list(operator.xor(set(reads_seq), set(class_good_trinity))) p = multiprocessing.Process(target=classify, args=(vectorizer_t, classifier_t, 10000, "validation/trinity_classified-" + str(ngram) + "-" + kernel.keys()[0] + "-" + str(kernel[kernel.keys()[0]]) + "-" + fileout_suffix + "-must-not-fit", reads_names, test_list[0:10000])) process_list.append(p) p.start() for p in process_list: p.join()
def eq_defined(self, otherword): """ Return True if the current word is equal in definition with otherword. @return: equal or not. @rtype: True/False """ eq_defnd = not xor(self.is_defined() , otherword.is_defined()) eq_tnwn = not xor(self.is_tanwin() , otherword.is_tanwin()) return eq_defnd and eq_tnwn
def decrypt(): message = '' for i in xrange(len(loadCipher())): if i % 3 == 0: message += chr(xor(ord('g'), int(loadCipher()[i]))) elif i % 3 == 1: message += chr(xor(ord('o'), int(loadCipher()[i]))) else: message += chr(xor(ord('d'), int(loadCipher()[i]))) return message
def Geffe(p1,p2,p3,s1,s2,s3,k): l1 = LFSR(p1,s1,k) l2 = LFSR(p2,s2,k) l3 = LFSR(p3,s3,k) lista = [] for i,j,k in zip(l1,l2,l3): x1 = i*j; x2= j*k; x3 = k; f = xor(xor(x1,x2),x3) lista.append(f) return lista
def one_of_three(a, b, c): # Logical table for a three part test where only one can be true: # 0 0 0: 0 # 0 0 1: 1 # 0 1 0: 1 # 0 1 1: 0 # 1 0 0: 1 # 1 0 1: 0 # 1 1 0: 0 # 1 1 1: 0 return xor(xor(a, b), c) and not (a and b and c)
def findKey(): for key in buildKeys(): message = '' for i in xrange(len(loadCipher())): if i % 3 == 0: message += chr(xor(ord(key[0]), int(loadCipher()[i]))) elif i % 3 == 1: message += chr(xor(ord(key[1]), int(loadCipher()[i]))) else: message += chr(xor(ord(key[2]), int(loadCipher()[i]))) if 'the ' in message.lower() and 'to ' in message.lower(): print message, key
def decode(lists = None,dekey = (ord('s'),ord('b'),ord('w'))): if lists is not None: res = list() for i in range(len(lists)): if i%3 == 0: res.append(xor(int(lists[i]), dekey[0])) elif i%3 == 1: res.append(xor(int(lists[i]), dekey[1])) elif i%3 == 2: res.append(xor(int(lists[i]), dekey[2])) else: return False content = [chr(int(res[i])) for i in range(len(res))] return content return False
def hamming_gen(error_object): ''' hamming_gen(error_object) -> String This Function Get An Error_object As Input and Return Modified String By Hamming Code Method As Output , Default Input Is Binary ''' try: new_str="" # Empty String if what(error_object.str)=="bin": # Condition For Checking Binary Or Hex (Default Is Bin) new_str=error_object.str elif what(error_object.str)=="hex": new_str=hex_2_bin(error_object.str) length=len(new_str) # Length Of Input String parity_number=int(log(length,2))+1 # Calc Number Of Needed Parity Code parity_index=[] # Empty List Of Parity Index message=[] # Empty List As Message p1=[0,2,4,6,8,10,12,14,16,18] # Parity-1 Indexs p2=[1,2,5,6,8,9,13,14,17,18] # Parity-2 Indexs p4=[3,4,5,6,11,12,13,14,19] # Parity-4 Indexs p8=[7,8,9,10,11,12,13,14] # Parity-8 Indexs p16=[15,16,17,18,19] # Parity-16 Indexs k=0 # Iteration Number parity_number_index=1 # Parity Number Index for i in range(length): # Loop For Extract available parity index in input message and added to parity _index if int(2**i)<=length and parity_number_index<=parity_number: parity_index.append(int(2**i)-1) parity_number_index=parity_number_index+1 # nubmer of total parity in string for i in range(length+len(parity_index)): # Modified A Init List For Hamming Code Output if i in parity_index: # By Inserting 0 in Parity Location message.append("0") else: message.append(new_str[k]) # And Insert Original Message Between them k=k+1 # Iter for i in range(length+len(parity_index)): # Calc Each Parity Bit In Message And Generate Hamming Output if i in p1: message[0]=str(xor(int(message[0]),int(message[i]))) if i in p2: message[1]=str(xor(int(message[1]),int(message[i]))) if i in p4: message[3]=str(xor(int(message[3]),int(message[i]))) if i in p8: message[7]=str(xor(int(message[7]),int(message[i]))) if i in p16: message[15]=str(xor(int(message[15]),int(message[i]))) result="".join(message) # Use Join Method to Convert List To String return result except: print("Something Wrong In Generating Hamming Code") return None
def run_now(modeladmin, request, queryset): start_at = request.POST['start_date'] end_at = request.POST['end_date'] if xor(bool(start_at), bool(end_at)): message = "You must either specify a both start date and an end " \ "date for the collector run, or neither" modeladmin.message_user(request, message, messages.ERROR) if start_at and end_at: try: datetime.strptime(start_at, '%Y-%m-%d') datetime.strptime(end_at, '%Y-%m-%d') except ValueError: message = "Incorrect date format, should be YYYY-MM-DD" modeladmin.message_user(request, message, messages.ERROR) for collector in queryset: try: run_collector.delay( collector.slug, start_at=start_at, end_at=end_at) except SystemExit: message = "An exception has occurred. " \ "Please check you are not trying to backfill a " \ "realtime collector" modeladmin.message_user(request, message, messages.ERROR)
def crc_gen(error_object,poly): ''' crc_gen(error_object , str) -> String This Function Get An Error_object And Polynomial Coef As Input And Return Modified String By CRC Method And That Poly As Output ''' try: new_str="" # Empty String if what(error_object.str)=="bin": # Condition For Checking Binary Or Hex (Default Is Bin) new_str=error_object.str elif what(error_object.str)=="hex": new_str=hex_2_bin(error_object.str) divider_len=len(poly) # Length Of Divider (Length Of Polynomial) extra_bit_len=divider_len-1 # Extra Bits = Polynomial Degree message=new_str+(extra_bit_len)*"0" # Add Zero At The End Of The Orignal Message length=len(message) # Length Of Modified Message length_init=len(new_str) # length Of Original Message message=list(message) # Convert Message String tO lIST start_index=0 # start_index variable default zero cond_list=["0"]*length_init # Generate Stop Condition Of CRC Method while(message[0:length-(extra_bit_len)]!=cond_list ): # Main Codition For Continue Divide temp=message[start_index:start_index+divider_len] # Subset Message By Divider Length for i in range(divider_len): # XOR Subset By Divider And Replace In Message temp[i]=str(xor(int(temp[i]),int(poly[i]))) message[start_index:start_index+divider_len]=temp for j in range(length): # Find Next Bit 1 And Update strat_index if message[j]=="1": start_index=j break result="".join(message[length-(extra_bit_len):]) # Convert List To String return new_str+result # Return Modified String except: print("Something Wrong In Generating Error_Detection Object!!") return None
def crc_det(error_object,poly): ''' crc_det(error_object) -> Boolean This Function Get An Error_Object And Return Boolean Value As Output ( Default Input Is Hex) ''' try: new_str=error_object.str # Copy Error Object String In New String Variable divider_len=len(poly) # Length Of Divider extra_bit_len=divider_len-1 # Length Of Extra Bits (Polynomial Degree) message=list(new_str) # Conert Input Message To List length=len(message) # Length Of Message start_index=0 # Start Index cond_list=["0"]*(length-extra_bit_len) # Stop Conditional while(message[0:length-extra_bit_len]!=cond_list): # Main Condition For Continue CRC Method temp=message[start_index:start_index+divider_len] # Subset Message By Divider Length for i in range(divider_len): # Calc XOR Of Poly And Subset And Replace In Message temp[i]=str(xor(int(temp[i]),int(poly[i]))) message[start_index:start_index+divider_len]=temp for j in range(length): # Find Next 1 Bit And Update Start_index if message[j]=="1": start_index=j break if "".join(message[length-extra_bit_len:])==(divider_len-1)*"0": # Check If Reminder Is Equal Zero Return True Else Return False return True else: return False except: print("Something Wrong In Detecting Error In Object") return None
def xorKey(num, key): "xor's each bytes of the key with the number, which is <256" from operator import xor out = '' for ch in key: out = out + chr(xor(num, ord(ch))) return out
def encrypt_old(self, passw, string): try: ret = xor(passw, string) s = base64.b64encode("".join(ret)) except: s = "" return s
def makeURL(self): url_data = URI.fromBytes(self._url) if url_data.scheme: args = { "scheme": url_data.scheme, "hostname": url_data.host, "port": url_data.port, "path": url_data.path, } else: args = { "scheme": "https" if self._ssl else "http", "hostname": self._hostname, "port": self._port, "path": self._url, } hasHost = bool(url_data.host) hostMatch = url_data.host.endswith(self._hostname) ipMatch = self._ipAddr in self._hostnameIp portProtoMatch = self._ssl and int(args['port']) == 443 or \ not self._ssl and int(args['port']) == 80 if hasHost and xor(hostMatch, ipMatch) or not ipMatch: self._proxyIp = self._ipAddr # Remove port if default (see RFC 2616, 14.23) if (int(args['port']) in (80, 443) and portProtoMatch) or \ bool(self._proxyIp) and not url_data.scheme: self._reqURL = "{scheme}://{hostname}{path}".format(**args) else: self._reqURL = "{scheme}://{hostname}:{port}{path}".format(**args) log.debug( "HTTP request URL: %s, Proxy: %s", self._reqURL, self._proxyIp )
def LFSR(coef,sem,longitud): assert(len(coef)==len(sem)) fin_cadena = "" fin_lista = [] #Por cada caracter a generar for k in xrange(0,longitud): #reiniciamos el feedback sj = 0 #por cada coeficiente del polinomio for i in xrange(0,len(coef)): #se multiplica (and) y se hace el xor con la anterior sj a = sem[i]*coef[i] sj = xor(sj,a) #se anade la semilla a la cadena fin_cadena += str(sem[len(sem)-1]) fin_lista.append(int(sem[len(sem)-1])) #se anade al principio sem = [sj]+sem # y se desplaza la semilla sem = sem[0:len(sem)-1] return fin_cadena,fin_lista
def decrypt_old(self, passw, string): try: ret = xor(passw, base64.b64decode(string)) s = "".join(ret) except: s = "" return s
def NLFSR(f, s, k): assert(len(f[0])==len(s)) fin_cadena = "" fin_lista = [] #generamos K numeros for _ in range(0,k): #f monomios sj = 0 for i in f: r = 1 #j variables for j,sem in zip(i,s): r += sem*j r = r%2 sj = xor(sj,r) fin_cadena += str(sj) fin_lista.append(sj) s = [sj]+s s = s[0:len(s)-1] return fin_cadena,fin_lista
operator.delitem(li,slice(2, 4)) print("\nthe modified list after delitem() is : ",end="") for i in range(0,len(li)): print(li[i],end=" ") print("\nthe 1st and 2nd element of list is : ",end=" ") print(operator.getitem(li,slice(0, 2))) # s1 = "geeksfor" s2 = "geeks" print("\nthe concatenated string is : ",end="") print(operator.concat(s1, s2)) if(operator.contains(s1, s2)): print("geeksfor contain geeks") else: print("geeksfor does not contain geeks") #bitwise a = 3 b= 4 print("\nthe bitwise and of a and b is : ",end="") print(operator.and_(a, b)) print("the bitwise or of a and b is : ",end="") print(operator.or_(a, b)) print("the bitwise xor of a and b is : ",end=" ") print(operator.xor(a, b)) print("the inverted value of a is : ",end="") print(operator.invert(a))
def split_number(numbers): arrnum = numbers.split("-") return arrnum def poscontains(position, string, char): if char == string[int(position)]: return True else: return False with open('input_day2.txt', newline='') as file: lines = file.readlines() validpassword = [] for line in lines: split = line.split(":") rules = split[0].split(" ") numbers = rules[0] letter = rules[1] count = int(split[1].count(letter)) highlow = split_number(numbers) #if (count <= int(highlow[1])) & (count >= int(highlow[0])): # print(count) # validpassword.append(split[1]) if xor(poscontains(highlow[0], split[1], letter), poscontains(highlow[1], split[1], letter)): validpassword.append(split[1]) print(len(validpassword))
print(tup[0]) # tuples are immutable # tup[0] = 99 tup2 = (tup, "more", 123) print(tup2) print(len(tup2)) names.extend(tup2) print(names) n2 = {"Fred": "Jones", "Alice": "Smith"} print(n2) print(type(n2)) print(n2["Fred"]) print("---------------") print(n2.keys()) print("---------------") print(n2.values()) print("---------------") print(n2.items()) print(True and True) # logical and/or should use the English word.. & | are bitwise print(True and False) print(True or False) # xor comes from library called operator from operator import xor print(xor(True, False)) print(xor(True, True))
def testOperators(self): with self.cached_session(): var_f = variables.Variable([2.0]) add = var_f + 0.0 radd = 1.0 + var_f sub = var_f - 1.0 rsub = 1.0 - var_f mul = var_f * 10.0 rmul = 10.0 * var_f div = var_f / 10.0 rdiv = 10.0 / var_f lt = var_f < 3.0 rlt = 3.0 < var_f le = var_f <= 2.0 rle = 2.0 <= var_f gt = var_f > 3.0 rgt = 3.0 > var_f ge = var_f >= 2.0 rge = 2.0 >= var_f neg = -var_f abs_v = abs(var_f) var_i = variables.Variable([20]) mod = var_i % 7 rmod = 103 % var_i var_b = variables.Variable([True, False]) and_v = operator.and_(var_b, [True, True]) or_v = operator.or_(var_b, [False, True]) xor_v = operator.xor(var_b, [False, False]) invert_v = ~var_b rnd = np.random.rand(4, 4).astype("f") var_t = variables.Variable(rnd) slice_v = var_t[2, 0:0] var_m = variables.Variable([[2.0, 3.0]]) matmul = var_m.__matmul__([[10.0], [20.0]]) rmatmul = var_m.__rmatmul__([[10.0], [20.0]]) self.evaluate(variables.global_variables_initializer()) self.assertAllClose([2.0], self.evaluate(add)) self.assertAllClose([3.0], self.evaluate(radd)) self.assertAllClose([1.0], self.evaluate(sub)) self.assertAllClose([-1.0], self.evaluate(rsub)) self.assertAllClose([20.0], self.evaluate(mul)) self.assertAllClose([20.0], self.evaluate(rmul)) self.assertAllClose([0.2], self.evaluate(div)) self.assertAllClose([5.0], self.evaluate(rdiv)) self.assertAllClose([-2.0], self.evaluate(neg)) self.assertAllClose([2.0], self.evaluate(abs_v)) self.assertAllClose([True], self.evaluate(lt)) self.assertAllClose([False], self.evaluate(rlt)) self.assertAllClose([True], self.evaluate(le)) self.assertAllClose([True], self.evaluate(rle)) self.assertAllClose([False], self.evaluate(gt)) self.assertAllClose([True], self.evaluate(rgt)) self.assertAllClose([True], self.evaluate(ge)) self.assertAllClose([True], self.evaluate(rge)) self.assertAllClose([6], self.evaluate(mod)) self.assertAllClose([3], self.evaluate(rmod)) self.assertAllClose([True, False], self.evaluate(and_v)) self.assertAllClose([True, True], self.evaluate(or_v)) self.assertAllClose([True, False], self.evaluate(xor_v)) self.assertAllClose([False, True], self.evaluate(invert_v)) self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v)) self.assertAllClose([[80.0]], self.evaluate(matmul)) self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
def test_bitwise_xor(self): self.assertRaises(TypeError, operator.xor) self.assertRaises(TypeError, operator.xor, None, None) self.assertTrue(operator.xor(0xb, 0xc) == 0x7)
def styled_fig_ax(size='wide', font_size=10.0, zero_lines=True, y_axis_grid=True, seaborn=False, subplots_rows=None, subplots_columns=None, subplots_kwargs=None, x_formatter=None, y_formatter=None, other_rc_params=None): """ Context manager for a styled axis. :param size: accepts 'wide' (top/bottom of slide), 'tall' (left side of slide), 'tallest' (left side of slide, lots of text) :param font_size: the main font size; some sizes are set relative to this; overridden on tallest. :param zero_lines: strong lines at x=0 and y=0 :param other_rc_params: any custom rcParams to include :param subplots_rows: the number of rows in the subplot (default=None) :param subplots_columns: the number of columns in the subplot (default=None) :param subplots_kwargs: dict of kwargs for subplots call (e.g., sharex=True) (default={}) :param x_formatter: Formatter to use for major ticks on x axis :param y_formatter: Formatter to use for major ticks on y axis """ subplots_kwargs = {} if subplots_kwargs is None else subplots_kwargs other_rc_params = {} if other_rc_params is None else other_rc_params sizes = { 'quarter': (6.8, 3.7), 'wide': (9.75, 3.6), 'tall': (6., 7.5), 'tallest': (6, 8.5), 'square': (6., 6.), 'custom': other_rc_params.get('figure.figsize', None), } figure_size = sizes[size] original_params = plt.rcParams.copy() if sizes == 'tallest': plt.rcParams['savefig.pad_inches'] = 0.0 plt.rcParams['savefig.pad_inches'] = 0.0 if font_size > 6.0: font_size = 6.0 # set globally here since this seems ignored by rc_context..... # apologies for side effects. plt.rcParams['axes.titlesize'] = 1.25 * font_size plt.rcParams['figure.dpi'] = 196 plt.rcParams['savefig.dpi'] = 196 plt.rcParams['legend.frameon'] = False plt.rcParams['legend.fontsize'] = 0.8 * font_size rc_params = { 'figure.figsize': figure_size, # font 'font.family': 'Verdana', 'font.size': font_size, 'axes.labelsize': font_size, 'xtick.labelsize': 0.8 * font_size, 'ytick.labelsize': 0.8 * font_size, # remove extras 'xtick.major.size': 0, # major tick size in points 'xtick.minor.size': 0, # minor tick size in points 'ytick.major.size': 0, # major tick size in points 'ytick.minor.size': 0, # minor tick size in points # colors 'axes.prop_cycle': cycler('color', get_palette(dict=False, hex=True)), # grid 'axes.facecolor': 'white', 'axes.edgecolor': '.8', 'axes.grid': False, 'grid.linestyle': '-', 'grid.linewidth': 0.25, 'grid.color': '#a3a3a3', 'axes.linewidth': 0.0, } rc_params.update(other_rc_params) def _adjust_figure_inplace(fig, ax): # set just x axis grid lines ax.grid(True) ax.xaxis.grid(False) if not y_axis_grid: ax.yaxis.grid(False) ax.set_axisbelow(True) if zero_lines: ax.axvline(x=0, c='k', linestyle='-', lw=0.7, alpha=0.5) ax.axhline(y=0, c='k', linestyle='-', lw=0.7, alpha=0.5) if x_formatter: ax.xaxis.set_major_formatter(x_formatter) if y_formatter: ax.yaxis.set_major_formatter(y_formatter) fig.tight_layout() if seaborn: with sns.axes_style(rc=rc_params): sns.set_palette(get_palette()) yield fig = plt.gcf() fig.set_size_inches(figure_size) # hack for joint plots (looks like main plot is always first) ax = plt.gcf().get_axes()[0] _adjust_figure_inplace(fig, ax) else: with plt.rc_context(rc_params): if xor((subplots_rows is not None), (subplots_columns is not None)): raise ValueError( "Must pass both subplots_rows and subplots_columns or neither." ) if subplots_rows is not None: fig, axes = plt.subplots(subplots_rows, subplots_columns, figsize=figure_size, **subplots_kwargs) for ax in axes: _adjust_figure_inplace(fig, ax) yield axes else: fig, ax = plt.subplots(figsize=figure_size) _adjust_figure_inplace(fig, ax) yield ax # reset after context manager is closed for k, v in original_params.items(): plt.rcParams[k] = v
def xnor(*args): return not_(xor(*args))
def test_bitwise_xor(self): self.failUnlessRaises(TypeError, operator.xor) self.failUnlessRaises(TypeError, operator.xor, None, None) self.failUnless(operator.xor(0xb, 0xc) == 0x7)
from operator import xor C1 = int(input("What is C1? "), 16) C2 = int(input("What is C2? "), 16) D1 = int(input("What is D1? "), 16) D2 = int(input("What is D2? "), 16) P1 = int(input("What is P1? "), 16) P2 = int(input("What is P2? "), 16) Q1 = xor(xor(D1, C1), P1) Q2 = xor(xor(D2, C2), P2) print("Q1 = ", hex(Q1)) print("Q2 = ", hex(Q2))
def diff3(yourtext, origtext, theirtext): """Three-way diff based on the GNU diff3.c by R. Smith. @param [in] yourtext Array of lines of your text. @param [in] origtext Array of lines of original text. @param [in] theirtext Array of lines of their text. @returns Array of tuples containing diff results. The tuples consist of (cmd, loA, hiA, loB, hiB), where cmd is either one of '0', '1', '2', or 'A'. """ # diff result => [(cmd, loA, hiA, loB, hiB), ...] d2 = (diff(origtext, yourtext), diff(origtext, theirtext)) d3 = [] r3 = [None, 0, 0, 0, 0, 0, 0] while d2[0] or d2[1]: # find a continual range in origtext lo2..hi2 # changed by yourtext or by theirtext. # # d2[0] 222 222222222 # origtext ...L!!!!!!!!!!!!!!!!!!!!H... # d2[1] 222222 22 2222222 r2 = ([], []) if not d2[0]: i = 1 else: if not d2[1]: i = 0 else: if d2[0][0][1] <= d2[1][0][1]: i = 0 else: i = 1 j = i k = xor(i, 1) hi = d2[j][0][2] r2[j].append(d2[j].pop(0)) while d2[k] and d2[k][0][1] <= hi + 1: hi_k = d2[k][0][2] r2[k].append(d2[k].pop(0)) if hi < hi_k: hi = hi_k j = k k = xor(k, 1) lo2 = r2[i][0][1] hi2 = r2[j][-1][2] # take the corresponding ranges in yourtext lo0..hi0 # and in theirtext lo1..hi1. # # yourtext ..L!!!!!!!!!!!!!!!!!!!!!!!!!!!!H... # d2[0] 222 222222222 # origtext ...00!1111!000!!00!111111... # d2[1] 222222 22 2222222 # theirtext ...L!!!!!!!!!!!!!!!!H... if r2[0]: lo0 = r2[0][0][3] - r2[0][0][1] + lo2 hi0 = r2[0][-1][4] - r2[0][-1][2] + hi2 else: lo0 = r3[2] - r3[6] + lo2 hi0 = r3[2] - r3[6] + hi2 if r2[1]: lo1 = r2[1][0][3] - r2[1][0][1] + lo2 hi1 = r2[1][-1][4] - r2[1][-1][2] + hi2 else: lo1 = r3[4] - r3[6] + lo2 hi1 = r3[4] - r3[6] + hi2 # detect type of changes if not r2[0]: cmd = '1' elif not r2[1]: cmd = '0' elif hi0 - lo0 != hi1 - lo1: cmd = 'A' else: cmd = '2' for d in range(0, hi0 - lo0 + 1): (i0, i1) = (lo0 + d - 1, lo1 + d - 1) ok0 = (0 <= i0 and i0 < len(yourtext)) ok1 = (0 <= i1 and i1 < len(theirtext)) if xor(ok0, ok1) or (ok0 and yourtext[i0] != theirtext[i1]): cmd = 'A' break d3.append((cmd, lo0, hi0, lo1, hi1, lo2, hi2)) return d3
def _xor(a: bool, b: bool) -> Tuple[bool]: return op.xor(a, b),
def bitwise_xor(a, b, **_): try: return operator.xor(a, b) except TypeError: return operator.xor(b, a)
def test_env(env, monkeypatch): monkeypatch.setattr(flow.FlowProject, "_store_bundled", gen._store_bundled) # We need to set the scheduler manually. The FakeScheduler is used for two # reasons. First, the FakeScheduler prints scripts to screen on submission # and we can capture that output. Second, the FakeScheduler won't try to # call any cluster executable (e.g. squeue) associated with the real # schedulers used on supported clusters. Otherwise submission would fail # when attempting to determine what jobs already exist on the scheduler. monkeypatch.setattr(env, "scheduler_type", FakeScheduler) # Force asserts to show the full file when failures occur. # Useful to debug errors that arise. # Must import the data into the project. with signac.TemporaryProject(name=gen.PROJECT_NAME) as p: with gen.get_masked_flowproject(p, environment=env) as fp: # Here we set the appropriate executable for all the operations. This # is necessary as otherwise the default executable between submitting # and running could look different depending on the environment. for group in fp.groups.values(): for op_key in group.operations: if op_key in group.operation_directives: monkeypatch.setitem( group.operation_directives[op_key], "executable", gen.MOCK_EXECUTABLE, ) fp.import_from(origin=gen.ARCHIVE_DIR) jobs = fp.find_jobs(dict(environment=_env_name(env))) if not len(jobs): raise RuntimeError( f"No reference data for environment {_env_name(env)}!") reference = [] generated = [] for job in jobs: parameters = job.sp.parameters() if "bundle" in parameters: bundle = parameters.pop("bundle") tmp_out = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding) with open(os.devnull, "w") as devnull: with redirect_stderr(devnull): with redirect_stdout(tmp_out): fp.submit( jobs=[job], names=bundle, pretend=True, force=True, bundle_size=len(bundle), **parameters, ) tmp_out.seek(0) msg = f"---------- Bundled submission of job {job}" generated.extend([msg] + tmp_out.read().splitlines()) with open(job.fn("script_{}.sh".format( "_".join(bundle)))) as file: reference.extend([msg] + file.read().splitlines()) else: for op in {**fp.operations, **fp.groups}: if "partition" in parameters: # Don't try to submit GPU operations to CPU partitions # and vice versa. We should be able to relax this # requirement if we make our error checking more # consistent. if operator.xor( "gpu" in parameters["partition"].lower(), "gpu" in op.lower(), ): continue tmp_out = io.TextIOWrapper(io.BytesIO(), sys.stdout.encoding) with open(os.devnull, "w") as devnull: with redirect_stderr(devnull): with redirect_stdout(tmp_out): fp.submit( jobs=[job], names=[op], pretend=True, force=True, **parameters, ) tmp_out.seek(0) msg = f"---------- Submission of operation {op} for job {job}." generated.extend([msg] + tmp_out.read().splitlines()) with open(job.fn(f"script_{op}.sh")) as file: reference.extend([msg] + file.read().splitlines()) assert "\n".join(generated) == "\n".join(reference)
sum+=i#실행문 i+=1#증감식 print('1부터 10까지 누적합',sum,sep=":") ''' 문]1부터 1000까지 숫자중 3의 배수 이거나 5의 배수인 숫자의 합을 구해라 단, 3과5의 공배수인 경우 제외(while문 사용) ''' import operator sum =0 i = 1 while i <=1000: #operator모듈의 xor()함수 사용 if operator.xor(i % 3 == 0,i % 5 == 0): sum += i # ^ 비튼 연산자 사용용 #if (i % 3 == 0) ^ (i % 5 == 0): # sum+=i i+=1 print('1부터 1000까지 3과 5의 배수의 합(공배수 제외):',sum,sep='') #반복할 횟수가 정해지지 않은 경우 while문 사용 print('[반복 횟수를 모를때]') import random i = 1 while i != 5: i = random.randint(1,10)#1부터 10까지 난수 발생 print('i는',i) ''' * 1 0 0 0
def check_net_id_and_subnet_id(body): if xor('neutron_net_id' in body, 'neutron_subnet_id' in body): msg = _("When creating a new share network subnet you need to " "specify both neutron_net_id and neutron_subnet_id or " "none of them.") raise webob.exc.HTTPBadRequest(explanation=msg)
from operator import xor def qianzhui(s1, s2): res1 = [] length = 0 for i in range(0, len(s1)): res1.append(s1[0:i + 1]) for i in range(0, len(s2)): if s2[0:i + 1] in res1: length = max(length, i + 1) return length n = int(input()) str1 = str(input()) Wilist = list(map(int, input().split(" "))) res = 0 for i in range(0, n - 1): for j in range(i, n - 1): s1 = str1[i:] s2 = str1[j:] res = max(res, qianzhui(s1, s2) + xor(Wilist[i], Wilist[j])) print(res)
def toboggan_password(first, second, character, password): first_match = password[first - 1] == character second_match = password[second - 1] == character return (first_match or second_match) and xor(first_match, second_match)
class TVMScriptParser(Transformer): """Synr AST visitor pass which finally lowers to TIR. Notes for Extension ------------------- 1. To support a new type of AST node, add a function transform_xxx(). 2. To support new functions, add the function to the appropriate registry: We divide allowed function calls in TVM script into 3 categories, intrin, scope_handler and special_stmt. 1. intrin functions are low level functions like mod, load, and constants. They correspond to a tir `IRNode`. They must have a return value. The user can register intrin functions for the parser to use. 2. scope_handler functions have no return value. They take two arguments: the parser and the AST node. scope_handler functions are used in with and for statements. 3. special_stmt functions handle cases that do not have a corresponding tir `IRNode`. These functions take the parser and the AST node as arguments and may return a value. When visiting a Call node, we check the special_stmt registry first. If no registered function is found, we then check the intrin registry. When visiting With node, we check the with_scope registry. When visiting For node, we check the for_scope registry. """ _binop_maker = { ast.BuiltinOp.Add: tvm.tir.Add, ast.BuiltinOp.Sub: tvm.tir.Sub, ast.BuiltinOp.Mul: tvm.tir.Mul, ast.BuiltinOp.Div: tvm.tir.Div, ast.BuiltinOp.FloorDiv: tvm.tir.FloorDiv, ast.BuiltinOp.Mod: tvm.tir.FloorMod, ast.BuiltinOp.BitOr: lambda lhs, rhs, span: operator.or_(lhs, rhs), ast.BuiltinOp.BitAnd: lambda lhs, rhs, span: operator.and_(lhs, rhs), ast.BuiltinOp.BitXor: lambda lhs, rhs, span: operator.xor(lhs, rhs), ast.BuiltinOp.GT: tvm.tir.GT, ast.BuiltinOp.GE: tvm.tir.GE, ast.BuiltinOp.LT: tvm.tir.LT, ast.BuiltinOp.LE: tvm.tir.LE, ast.BuiltinOp.Eq: tvm.tir.EQ, ast.BuiltinOp.NotEq: tvm.tir.NE, ast.BuiltinOp.And: tvm.tir.And, ast.BuiltinOp.Or: tvm.tir.Or, } _unaryop_maker = { ast.BuiltinOp.USub: lambda rhs, span: operator.neg(rhs), ast.BuiltinOp.Invert: lambda rhs, span: operator.invert(rhs), ast.BuiltinOp.Not: tvm.tir.Not, } def __init__(self, base_lienno, tir_namespace): self.context = None self.base_lineno = base_lienno self.current_lineno = 0 self.current_col_offset = 0 self.tir_namespace = tir_namespace self.meta = None def init_function_parsing_env(self): """Initialize function parsing environment""" self.context = ContextMaintainer(self.report_error) # scope emitter def init_meta(self, meta_dict): if meta_dict is not None: self.meta = tvm.ir.load_json(json.dumps(meta_dict)) def transform(self, node): """Generic transformation for visiting the AST. Dispatches to `transform_ClassName` for the appropriate ClassName.""" old_lineno, old_col_offset = self.current_lineno, self.current_col_offset if hasattr(node, "lineno"): self.current_lineno = self.base_lineno + node.lineno - 1 if hasattr(node, "col_offset"): self.current_col_offset = node.col_offset method = "transform_" + node.__class__.__name__ visitor = getattr(self, method, self.generic_visit) transform_res = visitor(node) self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return transform_res def match_tir_namespace(self, identifier: str) -> bool: """Check if the namespace is equal to tvm.script.tir""" return identifier in self.tir_namespace def report_error(self, message: str, span: Union[ast.Span, tvm.ir.Span]): """Report an error occuring at a location. This just dispatches to synr's DiagnosticContext. Parameters ---------- message : str Error message span : Union[synr.ast.Span, tvm.ir.Span】 Location of the error """ if isinstance(span, tvm.ir.Span): span = synr_span_from_tvm(span) self.error(message, span) def parse_body(self, parent): """Parse remaining statements in this scope. Parameters ---------- parent : synr.ast.Node Parent node of this scope. Errors will be reported here. """ body = [] spans = [] stmt = parent while len(self.context.node_stack[-1]) > 0: stmt = self.context.node_stack[-1].pop() spans.append(stmt.span) res = self.transform(stmt) if res is not None: body.append(res) if len(body) == 0: self.report_error( "Expected another statement at the end of this block. Perhaps you " "used a concise statement and forgot to include a body afterwards.", stmt.span, ) else: return ( tvm.tir.SeqStmt(body, tvm_span_from_synr(ast.Span.union(spans))) if len(body) > 1 else body[0] ) def parse_arg_list(self, func, node_call): """Match the arguments of a function call in the AST to the required arguments of the function. This handles positional arguments, positional arguments specified by name, keyword arguments, and varargs. Parameters ---------- func : Function The function that provides the signature node_call: ast.Call The AST call node that calls into the function. Returns ------- arg_list : list The parsed positional argument. """ assert isinstance(node_call, ast.Call) # collect arguments args = [self.transform(arg) for arg in node_call.params] kw_args = { self.transform(k): self.transform(v) for k, v in node_call.keyword_params.items() } # get the name and parameter list of func if isinstance(func, (Intrin, ScopeHandler, SpecialStmt)): func_name, param_list = func.signature() else: self.report_error( "Internal Error: function must be of type Intrin, ScopeHandler or SpecialStmt, " f"but it is {type(func).__name__}", node_call.span, ) # check arguments and parameter list and get a list of arguments reader = CallArgumentReader(func_name, args, kw_args, self, node_call) pos_only, kwargs, varargs = param_list internal_args = list() for i, arg_name in enumerate(pos_only): internal_args.append(reader.get_pos_only_arg(i + 1, arg_name)) for i, arg_info in enumerate(kwargs): arg_name, default = arg_info internal_args.append(reader.get_kwarg(i + 1 + len(pos_only), arg_name, default=default)) if varargs is not None: internal_args.extend(reader.get_varargs(len(pos_only) + len(kwargs) + 1)) elif len(args) + len(kw_args) > len(pos_only) + len(kwargs): self.report_error( "Arguments mismatched. " + f"Expected {len(pos_only) + len(kwargs)} args but got " + f"{len(args) + len(kw_args)}", node_call.span, ) return internal_args def parse_type(self, type_node, parent): """Parse a type annotation. We require the parent object to the type so that we have a place to report the error message if the type does not exist. """ if type_node is None: self.report_error("A type annotation is required", parent.span) res_type = self.transform(type_node) return tvm.ir.TupleType([]) if res_type is None else res_type.evaluate() def generic_visit(self, node): """Fallback visitor if node type is not handled. Reports an error.""" self.report_error(type(node).__name__ + " AST node is not supported", node.span) def transform_Module(self, node): """Module visitor Right now, we only support two formats for TVM Script. Example ------- 1. Generate a PrimFunc (If the code is printed, then it may also contain metadata) .. code-block:: python import tvm @tvm.script def A(...): ... # returns a PrimFunc func = A 2. Generate an IRModule .. code-block:: python import tvm @tvm.script.ir_module class MyMod(): @T.prim_func def A(...): ... @T.prim_func def B(...): ... __tvm_meta__ = ... # returns an IRModule mod = MyMod """ if len(node.funcs) == 1: return self.transform(next(iter(node.funcs.values()))) elif len(node.func) == 0: self.report_error( "You must supply at least one class or function definition", node.span ) else: self.report_error( "Only one-function, one-class or function-with-meta source code is allowed", ast.Span.union([x.span for x in list(node.funcs.values())[1:]]), ) def transform_Class(self, node): """Class definition visitor. A class can have multiple function definitions and a single :code:`__tvm_meta__` statement. Each class corresponds to a single :code:`IRModule`. Example ------- .. code-block:: python @tvm.script.ir_module class MyClass: __tvm_meta__ = {} def A(): T.evaluate(0) """ if len(node.assignments) == 1: if not ( len(node.assignments[0].lhs) == 1 and isinstance(node.assignments[0].lhs[0], ast.Var) and node.assignments[0].lhs[0].id.name == "__tvm_meta__" ): self.report_error( "The only top level assignments allowed are `__tvm_meta__ = ...`", node.assignments[0].span, ) self.init_meta( MetaUnparser().do_transform(node.assignments[0].rhs, self._diagnostic_context) ) elif len(node.assignments) > 1: self.report_error( "Only a single top level `__tvm_meta__` is allowed", ast.Span.union([x.span for x in node.assignments[1:]]), ) return IRModule( {GlobalVar(name): self.transform(func) for name, func in node.funcs.items()} ) def transform_Function(self, node): """Function definition visitor. Each function definition is translated to a single :code:`PrimFunc`. There are a couple restrictions on TVM Script functions: 1. Function arguments must have their types specified. 2. The body of the function can contain :code:`func_attr` to specify attributes of the function (like it's name). 3. The body of the function can also contain multiple :code:`buffer_bind`s, which give shape and dtype information to arguments. 4. Return statements are implicit. Example ------- .. code-block:: python @T.prim_func def my_function(x: T.handle): # 1. Argument types T.func_attr({"global_symbol": "mmult"}) # 2. Function attributes X_1 = tir.buffer_bind(x, [1024, 1024]) # 3. Buffer binding T.evaluate(0) # 4. This function returns 0 """ def check_decorator(decorators: List[ast.Expr]) -> bool: """Check the decorator is `T.prim_func""" if len(decorators) != 1: return False d: ast.Expr = decorators[0] return ( isinstance(d, ast.Attr) and isinstance(d.object, ast.Var) and self.match_tir_namespace(d.object.id.name) and d.field.name == "prim_func" ) self.init_function_parsing_env() self.context.enter_scope(nodes=node.body.stmts) # add parameters of function for arg in node.params: arg_var = tvm.te.var(arg.name, self.parse_type(arg.ty, arg)) self.context.update_symbol(arg.name, arg_var, node) self.context.func_params.append(arg_var) if not check_decorator(node.decorators): self.report_error( "All functions should be decorated by `T.prim_func`", node.span, ) # fetch the body of root block body = self.parse_body(node.body) # return a tir.PrimFunc dict_attr = self.context.func_dict_attr ret_type = self.parse_type(node.ret_type, node) if node.ret_type is not None else None func = tvm.tir.PrimFunc( self.context.func_params, body, ret_type, buffer_map=self.context.func_buffer_map, attrs=tvm.ir.make_node("DictAttrs", **dict_attr) if dict_attr else None, span=tvm_span_from_synr(node.span), ) # New Scope : Implicit root block # Each function contains an implicit root block in TensorIR, # so here we need a block scope for it. # If the PrimFunc is not a TensorIR func (e.g. TE scheduled func or low-level func), # the root block will not be added. The logic to add root block is in `_ffi_api.Complete` # Fix the PrimFunc # 1. generate root block if necessary # 2. generate surrounding loops for blocks if necessary func = call_with_error_reporting( self.report_error, node.span, _ffi_api.Complete, func, self.context.root_alloc_buffers, ) self.context.exit_scope() return func def transform_Lambda(self, node): """Lambda visitor Return an array of input parameters and the transformed lambda body. """ self.context.enter_scope(nodes=[node.body]) # add parameters of the lambda arg_vars = [] for arg in node.params: arg_var = tvm.te.var(arg.name) arg_vars.append(arg_var) self.context.update_symbol(arg.name, arg_var, node) # the body of a lambda must be an expr if not isinstance(node.body, ast.Expr): self.report_error("The body of a lambda must be an expression", node.span) # transform the body of the lambda body = self.transform(node.body) self.context.exit_scope() return arg_vars, body def transform_Assign(self, node): """Assign visitor AST abstract grammar: Assign(expr* targets, expr value, string? type_comment) By now 3 patterns of Assign is supported: 1. special stmts with return value 1.1 Buffer = T.match_buffer()/T.buffer_decl() 1.2 Var = T.var() 1.3 Var = T.env_thread() 2. (BufferStore) Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr 3. (Store) Var[PrimExpr] = PrimExpr 4. with scope handlers with concise scoping and var def 4.1 var = T.allocate() """ if isinstance(node.rhs, ast.Call): # Pattern 1 & Pattern 4 func = self.transform(node.rhs.func_name) if isinstance(func, WithScopeHandler): if not func.concise_scope or not func.def_symbol: self.report_error( "with scope handler " + func.signature()[0] + " is not suitable here", node.rhs.span, ) # Pattern 4 arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) return func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) elif isinstance(func, SpecialStmt): # Pattern 1 arg_list = self.parse_arg_list(func, node.rhs) func.handle(node, self.context, arg_list, node.rhs.func_name.span) return self.parse_body(node) else: value = self.transform(node.rhs) if len(node.lhs) == 1 and not isinstance(node.lhs[0], ast.Var): # This is a little confusing because it only is true when # we have taken this branch. We might need to clarify what # exectly is allowed in Assignments in tvmscript. self.report_error( "Left hand side of assignment must be an unqualified variable", node.span, ) ast_var = node.lhs[0] var = tvm.te.var( ast_var.id.name, self.parse_type(node.ty, ast_var), span=tvm_span_from_synr(ast_var.span), ) self.context.update_symbol(var.name, var, node) body = self.parse_body(node) self.context.remove_symbol(var.name) return tvm.tir.LetStmt(var, value, body, span=tvm_span_from_synr(node.span)) self.report_error( """Assignments should be either 1. A "special statement" with return value 1.1 Buffer = T.match_buffer()/T.buffer_decl() 1.2 Var = T.var() 1.3 Var = T.env_thread() 2. A store into a buffer: Buffer[PrimExpr, PrimExpr, ..., PrimExpr] = PrimExpr 3. A store into a variable: Var[PrimExpr] = PrimExpr 4. A with scope handler with concise scoping and var def 4.1 var = T.allocate()""", node.span, ) def transform_SubscriptAssign(self, node): """Visitor for statements of the form :code:`x[1] = 2`.""" symbol = self.transform(node.params[0]) indexes = self.transform(node.params[1]) rhs = self.transform(node.params[2]) rhs_span = tvm_span_from_synr(node.params[2].span) if isinstance(symbol, tvm.tir.Buffer): # BufferStore return tvm.tir.BufferStore( symbol, tvm.runtime.convert(rhs, span=rhs_span), indexes, span=tvm_span_from_synr(node.span), ) else: if symbol.dtype == "handle" and len(indexes) != 1: self.report_error( "Handles only support one-dimensional indexing. Use `T.match_buffer` to " "construct a multidimensional buffer from a handle.", node.params[0].span, ) if len(indexes) != 1: self.report_error( f"Store is only allowed with one index, but {len(indexes)} were provided.", node.params[1].span, ) # Store return tvm.tir.Store( symbol, tvm.runtime.convert(rhs, span=rhs_span), indexes[0], tvm.runtime.convert(True, span=tvm_span_from_synr(node.span)), span=tvm_span_from_synr(node.span), ) def transform_Assert(self, node): """Assert visitor Pattern corresponds to concise mode of :code:`with T.Assert()`. """ condition = self.transform(node.condition) if node.msg is None: self.report_error("Assert statements must have an error message.", node.span) message = self.transform(node.msg) body = self.parse_body(node) return tvm.tir.AssertStmt( condition, tvm.runtime.convert(message), body, span=tvm_span_from_synr(node.span) ) def transform_For(self, node): """For visitor AST abstract grammar: For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment) By now 1 pattern of For is supported: 1. for scope handler for name in T.serial()/T.parallel()/T.vectorized()/T.unroll()/range()/ T.grid()/T.thread_binding() """ if not isinstance(node.rhs, ast.Call): self.report_error("The loop iterator should be a function call.", node.rhs.span) func = self.transform(node.rhs.func_name) if not isinstance(func, ForScopeHandler): self.report_error( "Only For scope handlers can be used in a for statement.", node.rhs.func_name.span ) # prepare for new for scope old_lineno, old_col_offset = self.current_lineno, self.current_col_offset self.current_lineno = node.span.start_line self.current_col_offset = node.span.start_column self.context.enter_scope(nodes=node.body.stmts) # for scope handler process the scope arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) # exit the scope self.context.exit_scope() self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return res def transform_While(self, node): """While visitor AST abstract grammar: While(expr condition, stmt* body) """ condition = self.transform(node.condition) # body self.context.enter_scope(nodes=node.body.stmts) body = self.parse_body(node) self.context.exit_scope() return tvm.tir.While(condition, body, span=tvm_span_from_synr(node.span)) def transform_With(self, node): """With visitor AST abstract grammar: With(withitem* items, stmt* body, string? type_comment) withitem = (expr context_expr, expr? optional_vars) By now 2 patterns of With is supported: 1. with scope handler with symbol def with T.block(*axes)/T.allocate() as targets: 2. with scope handler without symbol def with T.let()/T.Assert()/T.attr()/T.realize() """ if not isinstance(node.rhs, ast.Call): self.report_error( "The context expression of a `with` statement should be a function call.", node.rhs.span, ) func = self.transform(node.rhs.func_name) if not isinstance(func, WithScopeHandler): self.report_error( f"Function {func} cannot be used in a `with` statement.", node.rhs.func_name.span ) # prepare for new block scope old_lineno, old_col_offset = self.current_lineno, self.current_col_offset self.current_lineno = node.body.span.start_line self.current_col_offset = node.body.span.start_column self.context.enter_block_scope(nodes=node.body.stmts) # with scope handler process the scope arg_list = self.parse_arg_list(func, node.rhs) func.enter_scope(node, self.context, arg_list, node.rhs.func_name.span) func.body = self.parse_body(node) res = func.exit_scope(node, self.context, arg_list, node.rhs.func_name.span) # exit the scope self.context.exit_block_scope() self.current_lineno, self.current_col_offset = old_lineno, old_col_offset return res def transform_If(self, node): """If visitor AST abstract grammar: If(expr test, stmt* body, stmt* orelse) """ condition = self.transform(node.condition) # then body self.context.enter_scope(nodes=node.true.stmts) then_body = self.parse_body(node) self.context.exit_scope() # else body if len(node.false.stmts) > 0: self.context.enter_scope(nodes=node.false.stmts) else_body = self.parse_body(node) self.context.exit_scope() else: else_body = None return tvm.tir.IfThenElse( condition, then_body, else_body, span=tvm_span_from_synr(node.span) ) def transform_Call(self, node): """Call visitor 3 different Call patterns are allowed: 1. Intrin representing a PrimExpr/IterVar 1.1 tir.int/uint/float8/16/32/64/floormod/floordiv/load/cast/ramp/broadcast/max 1.2 tir.range/reduce_axis/scan_axis/opaque_axis 2. tir.Op(dtype, ...) 3. other callable functions """ if isinstance(node.func_name, ast.Op): if node.func_name.name == ast.BuiltinOp.Subscript: return self.transform_Subscript(node) if node.func_name.name in self._binop_maker: lhs = self.transform(node.params[0]) # There is no supertype for everything that can appear in # an expression, so we manually add what we might get here. if not isinstance(lhs, (tvm.tir.PrimExpr, BufferSlice)): # We would really like to report a more specific # error here, but this parser contains no distinction # between parsing statements and parsing expressions. All # rules just call `transform`. self.report_error( f"Left hand side of binary op must be a PrimExpr, " "but it is a {type(lhs).__name__}", node.params[0].span, ) rhs = self.transform(node.params[1]) if not isinstance(rhs, (tvm.tir.PrimExpr, BufferSlice)): self.report_error( f"Right hand side of binary op must be a PrimExpr, " "but it is a {type(rhs).__name__}", node.params[1].span, ) return call_with_error_reporting( self.report_error, node.span, lambda node, lhs, rhs, span: self._binop_maker[node.func_name.name]( lhs, rhs, span=span ), node, lhs, rhs, tvm_span_from_synr(node.span), ) if node.func_name.name in self._unaryop_maker: rhs = self.transform(node.params[0]) return self._unaryop_maker[node.func_name.name]( rhs, span=tvm_span_from_synr(node.span) ) self.report_error(f"Unsupported operator {node.func_name.name}.", node.func_name.span) else: func = self.transform(node.func_name) if isinstance(func, Intrin) and not func.stmt: # pattern 1 arg_list = self.parse_arg_list(func, node) return call_with_error_reporting( self.report_error, node.func_name.span, func.handle, arg_list, node.func_name.span, ) else: args = [self.transform(arg) for arg in node.params] kw_args = { self.transform(k): self.transform(v) for k, v in node.keyword_params.items() } if isinstance(func, tvm.tir.op.Op): if not "dtype" in kw_args.keys(): self.report_error(f"{func} requires a dtype keyword argument.", node.span) # pattern 2 return tvm.tir.Call( kw_args["dtype"], func, args, span=tvm_span_from_synr(node.span) ) elif callable(func): # pattern 3 return func(*args, **kw_args) else: self.report_error( f"Function is neither callable nor a tvm.tir.op.Op (it is a {type(func)}).", node.func_name.span, ) def transform_UnassignedCall(self, node): """Visitor for statements that are function calls. This handles function calls that appear on thier own line like `tir.realize`. Examples -------- .. code-block:: python @T.prim_func def f(): A = T.buffer_decl([10, 10]) T.realize(A[1:2, 1:2], "") # This is an UnassignedCall A[1, 1] = 2 # This is also an UnassignedCall """ # Only allowed builtin operator that can be a statement is x[1] = 3 i.e. subscript assign. if isinstance(node.call.func_name, ast.Op): if node.call.func_name.name != ast.BuiltinOp.SubscriptAssign: self.report_error( "Binary and unary operators are not allowed as a statement", node.span ) else: return self.transform_SubscriptAssign(node.call) # handle a regular function call func = self.transform(node.call.func_name) arg_list = self.parse_arg_list(func, node.call) if isinstance(func, tir.scope_handler.AssertHandler): self.report_error( "A standalone `T.Assert` is not allowed. Use `assert condition, message` " "instead.", node.call.func_name.span, ) if isinstance(func, Intrin): if func.stmt: return call_with_error_reporting( self.report_error, node.call.func_name.span, func.handle, arg_list, node.call.func_name.span, ) else: self.report_error(f"This intrinsic cannot be used as a statement.", node.call.span) elif isinstance(func, WithScopeHandler) and func.concise_scope and not func.def_symbol: func.enter_scope(node, self.context, arg_list, node.call.func_name.span) func.body = self.parse_body(node) return func.exit_scope(node, self.context, arg_list, node.call.func_name.span) elif isinstance(func, SpecialStmt) and not func.def_symbol: func.handle(node, self.context, arg_list, node.call.func_name.span) return self.report_error( "Unexpected statement. Expected an assert, an intrinsic, a with statement, or a " f"special statement, but got {type(func).__name__}.", node.call.func_name.span, ) def transform_Slice(self, node): start = self.transform(node.start) end = self.transform(node.end) if not (isinstance(node.step, ast.Constant) and node.step.value == 1): self.report_error("Only step size 1 is supported for slices.", node.step.span) return Slice(start, end) def transform_Subscript(self, node): """Array access visitor. By now only 3 types of Subscript are supported: 1. Buffer[index, index, ...], Buffer element access(BufferLoad & BufferStore) Var[index] Buffer element access() 2. Buffer[start: stop, start: stop, ...], BufferRealize(realize(buffer[...])) 3. Array[index], Buffer element access """ symbol = self.transform(node.params[0]) if symbol is None: self.report_error( f"Variable {node.params[0].id.name} is not defined.", node.params[0].span ) indexes = [self.transform(x) for x in node.params[1].values] if isinstance(symbol, tvm.tir.expr.Var): if symbol.dtype == "handle": self.report_error( "Cannot read directly from a handle, use `T.match_buffer` " "to create a buffer to read from.", node.params[0].span, ) if len(indexes) > 1: self.report_error( "Only a single index can be provided when indexing into a `var`.", node.params[1].span, ) index = indexes[0] if not isinstance(index, (tvm.tir.PrimExpr, int)): self.report_error( "Var load index should be an int or PrimExpr, but it is a" + type(index), node.span, ) return call_with_error_reporting( self.report_error, node.span, tvm.tir.Load, "float32", symbol, index, True, span=tvm_span_from_synr(node.span), ) elif isinstance(symbol, tvm.tir.Buffer): return BufferSlice( symbol, indexes, self.report_error, span=tvm_span_from_synr(node.span) ) elif isinstance(symbol, tvm.container.Array): if len(indexes) > 1: self.report_error( "Array access should be one-dimension access, but the indices are " + str(indexes), node.span, ) index = indexes[0] if not isinstance(index, (int, tvm.tir.expr.IntImm)): self.report_error( "Array access index expected int or IntImm, but got " + type(index), node.span, ) if int(index) >= len(symbol): self.report_error( f"Array access out of bound, size: {len(symbol)}, got index {index}.", node.span, ) return symbol[int(index)] else: self.report_error( f"Cannot subscript from a {type(symbol).__name__}. Only variables and " "buffers are supported.", node.params[0].span, ) def transform_Attr(self, node): """Visitor for field access of the form `x.y`. This visitor is used to lookup function and symbol names. We have two cases to handle here: 1. If we have a statement of the form `tir.something`, then we lookup `tir.something` in the `Registry`. If the function is not in the registry, then we try to find a `tvm.ir.op.Op` with the same name. 2. All other names `tvm.something` are lookup up in this current python namespace. """ def get_full_attr_name(node: ast.Attr) -> str: reverse_field_names = [node.field.name] while isinstance(node.object, ast.Attr): node = node.object reverse_field_names.append(node.field.name) if isinstance(node.object, ast.Var): reverse_field_names.append(node.object.id.name) return ".".join(reversed(reverse_field_names)) if isinstance(node.object, (ast.Var, ast.Attr)): full_attr_name = get_full_attr_name(node) attr_object, fields = full_attr_name.split(".", maxsplit=1) if self.match_tir_namespace(attr_object): func_name = "tir." + fields res = Registry.lookup(func_name) if res is not None: return res try: return tvm.ir.op.Op.get(func_name) except TVMError as e: # Check if we got an attribute error if e.args[0].find("AttributeError"): self.report_error(f"Unregistered function `tir.{fields}`.", node.span) else: raise e symbol = self.transform(node.object) if symbol is None: self.report_error("Unsupported Attribute expression.", node.object.span) if not hasattr(symbol, node.field.name): self.report_error( f"Type {type(symbol)} does not have a field called `{node.field.name}`.", node.span ) res = getattr(symbol, node.field.name) return res def transform_TypeAttr(self, node): """Visitor for field access of the form `x.y` for types. We have two cases here: 1. If the type is of the form `T.something`, we look up the type in the `tir` namespace in this module. 2. If the type is of the form `tvm.x.something` then we look up `tvm.x.something` in this modules namespace. """ if isinstance(node.object, ast.TypeVar): if self.match_tir_namespace(node.object.id.name): if not hasattr(tir, node.field.name): self.report_error( f"Invalid type annotation `tir.{node.field.name}`.", node.span ) return getattr(tir, node.field.name) symbol = self.transform(node.object) if symbol is None: self.report_error("Unsupported Attribute expression", node.object.span) if not hasattr(symbol, node.field): self.report_error( f"Type {type(symbol)} does not have a field called `{node.field}`.", node.span ) res = getattr(symbol, node.field) return res def transform_DictLiteral(self, node): """Dictionary literal visitor. Handles dictionary literals of the form `{x:y, z:2}`. """ keys = [self.transform(key) for key in node.keys] values = [self.transform(value) for value in node.values] return dict(zip(keys, values)) def transform_Tuple(self, node): """Tuple visitor. Handles tuples of the form `(x, y, 2)`. """ return tuple(self.transform(element) for element in node.values) def transform_ArrayLiteral(self, node): """List literal visitor. Handles lists of the form `[x, 2, 3]`. """ return [self.transform(element) for element in node.values] def transform_Var(self, node): """Variable visitor Handles variables like `x` in `x = 2`. """ name = node.id.name if name == "meta": return self.meta symbol = Registry.lookup(name) if symbol is not None: return symbol symbol = self.context.lookup_symbol(name) if symbol is not None: return symbol self.report_error(f"Unknown identifier {name}.", node.span) def transform_TypeVar(self, node): """Type variable visitor. Equivalent to `transform_Var` but for types. """ name = node.id.name symbol = Registry.lookup(name) or self.context.lookup_symbol(name) if symbol is not None: return symbol self.report_error(f"Unknown identifier {name}.", node.span) def transform_Constant(self, node): """Constant value visitor. Constant values include `None`, `"strings"`, `2` (integers), `4.2` (floats), and `true` (booleans). """ return tvm.runtime.convert(node.value, span=tvm_span_from_synr(node.span)) def transform_TypeConstant(self, node): """Constant value visitor for types. See `transform_Constant`. """ return node.value def transform_Return(self, node): self.report_error( "TVM script does not support return statements. Instead the last statement in any " "block is implicitly returned.", node.span, )
def wsdl2py(args=None): """ A utility for automatically generating client interface code from a wsdl definition, and a set of classes representing element declarations and type definitions. This will produce two files in the current working directory named after the wsdl definition name. eg. <definition name='SampleService'> SampleService.py SampleService_types.py """ op = optparse.OptionParser(usage="usage: %prog [options]", description=wsdl2py.__doc__) # Basic options op.add_option("-f", "--file", action="store", dest="file", default=None, type="string", help="FILE to load wsdl from") op.add_option("-u", "--url", action="store", dest="url", default=None, type="string", help="URL to load wsdl from") op.add_option("-x", "--schema", action="store_true", dest="schema", default=False, help="process just the schema from an xsd file [no services]") op.add_option("-d", "--debug", action="callback", callback=SetDebugCallback, help="debug output") # WS Options op.add_option("-a", "--address", action="store_true", dest="address", default=False, help="ws-addressing support, must include WS-Addressing schema.") # pyclass Metaclass op.add_option("-b", "--complexType", action="callback", callback=SetPyclassMetaclass, callback_kwargs={'module':'ZSI.generate.pyclass', 'metaclass':'pyclass_type'}, help="add convenience functions for complexTypes, including Getters, Setters, factory methods, and properties (via metaclass). *** DONT USE WITH --simple-naming ***") # Lazy Evaluation of Typecodes (done at serialization/parsing when needed). op.add_option("-l", "--lazy", action="callback", callback=SetUpLazyEvaluation, callback_kwargs={}, help="EXPERIMENTAL: recursion error solution, lazy evalution of typecodes") # Use Twisted op.add_option("-w", "--twisted", action="callback", callback=SetUpTwistedClient, callback_kwargs={'module':'ZSI.generate.pyclass', 'metaclass':'pyclass_type'}, help="generate a twisted.web client, dependencies python>=2.4, Twisted>=2.0.0, TwistedWeb>=0.5.0") # Extended generation options op.add_option("-e", "--extended", action="store_true", dest="extended", default=False, help="Do Extended code generation.") op.add_option("-z", "--aname", action="store", dest="aname", default=None, type="string", help="pass in a function for attribute name creation") op.add_option("-t", "--types", action="store", dest="types", default=None, type="string", help="file to load types from") op.add_option("-o", "--output-dir", action="store", dest="output_dir", default=".", type="string", help="Write generated files to OUTPUT_DIR") op.add_option("-s", "--simple-naming", action="store_true", dest="simple_naming", default=False, help="Simplify generated naming.") op.add_option("-c", "--clientClassSuffix", action="store", dest="clientClassSuffix", default=None, type="string", help="Suffix to use for service client class (default \"SOAP\")") op.add_option("-m", "--pyclassMapModule", action="store", dest="pyclassMapModule", default=None, type="string", help="Python file that maps external python classes to a schema type. The classes are used as the \"pyclass\" for that type. The module should contain a dict() called mapping in the format: mapping = {schemaTypeName:(moduleName.py,className) }") if args is None: (options, args) = op.parse_args() else: (options, args) = op.parse_args(args) if not xor(options.file is None, options.url is None): print 'Must specify either --file or --url option' sys.exit(os.EX_USAGE) location = options.file if options.url is not None: location = options.url if options.schema is True: reader = XMLSchema.SchemaReader(base_url=location) else: reader = WSDLTools.WSDLReader() load = reader.loadFromFile if options.url is not None: load = reader.loadFromURL wsdl = None try: wsdl = load(location) except Exception, e: print "Error loading %s: \n\t%s" % (location, e) # exit code UNIX specific, Windows? if hasattr(os, 'EX_NOINPUT'): sys.exit(os.EX_NOINPUT) sys.exit("error loading %s" %location)
def maximizingXor(l, r): return max(operator.xor(*i) for i in list(itertools.combinations_with_replacement(range(l, r + 1), 2)))
def xor(var1, var2): assert (len(var1) == len(var2)) bits = [operator.xor(var1[i], var2[i]) for i in range(len(var1))] return bits
def test_bitwise_xor(self): self.failUnless(operator.xor(0xb, 0xc) == 0x7)
step = 0 epoch = 0 while learned_flag == False: roop0 = int(roop0 % (Input.size / 2)) #forward idx_a_rdm = int(np.random.choice(index, 1, replace=False)) idx_b_rdm = int(np.random.choice(index, 1, replace=False)) ya, s_ij_a = calc_output(Input.T[idx_a_rdm], Input.T[idx_b_rdm], Param_ij[0].W, Param_ij[0].theta) yb, s_ij_b = calc_output(Input.T[idx_a_rdm], Input.T[idx_b_rdm], Param_ij[1].W, Param_ij[1].theta) z, s_jk = calc_output(ya, yb, Param_jk.W, Param_jk.theta) print("z =", z) delta_fw = z - xor(int(Input.T[idx_a_rdm]), int(Input.T[idx_b_rdm])) E_fw = delta_fw**2 E_fw_log.append(E_fw) print("Eroor = ", E_fw) #backward ... 誤差は(1 + 2)個? delta_bw_jk = (Param_jk.W[0] + Param_jk.W[1]) * delta_fw delta_bw_ij_a = Param_ij[0].W[0] * delta_bw_jk + Param_ij[0].W[ 1] * delta_fw delta_bw_ij_b = Param_ij[1].W[0] * delta_bw_jk + Param_ij[1].W[ 1] * delta_fw E_bw_jk = delta_bw_jk**2 E_bw_ij_a = delta_bw_ij_a**2 E_bw_ij_b = delta_bw_ij_b**2
def test_get_TestEnvironment(self): # Force asserts to show the full file when failures occur. # Useful to debug errors that arise. self.maxDiff = None # Must import the data into the project. with signac.TemporaryProject(name=gen.PROJECT_NAME) as p: fp = gen.get_masked_flowproject(p) fp.import_from(origin=gen.ARCHIVE_DIR) jobs = fp.find_jobs(dict(environment=self.env_name())) if not len(jobs): raise RuntimeError("No reference data for environment {}!".format(self.env_name())) reference = [] generated = [] for job in jobs: parameters = job.sp.parameters() if 'bundle' in parameters: bundle = parameters.pop('bundle') tmp_out = io.TextIOWrapper( io.BytesIO(), sys.stdout.encoding) with open(os.devnull, 'w') as devnull: with redirect_stderr(devnull): with redirect_stdout(tmp_out): fp.submit( env=self.env, jobs=[job], names=bundle, pretend=True, force=True, bundle_size=len(bundle), **parameters) tmp_out.seek(0) msg = "---------- Bundled submission of job {}".format(job) generated.extend([msg] + tmp_out.read().splitlines()) with open(job.fn('script_{}.sh'.format('_'.join(bundle)))) as file: reference.extend([msg] + file.read().splitlines()) else: for op in fp.operations: if 'partition' in parameters: # Don't try to submit GPU operations to CPU partitions # and vice versa. We should be able to relax this # requirement if we make our error checking more # consistent. if operator.xor( 'gpu' in parameters['partition'].lower(), 'gpu' in op.lower()): continue tmp_out = io.TextIOWrapper( io.BytesIO(), sys.stdout.encoding) with open(os.devnull, 'w') as devnull: with redirect_stderr(devnull): with redirect_stdout(tmp_out): fp.submit( env=self.env, jobs=[job], names=[op], pretend=True, force=True, **parameters) tmp_out.seek(0) msg = "---------- Submission of operation {} for job {}.".format(op, job) generated.extend([msg] + tmp_out.read().splitlines()) with open(job.fn('script_{}.sh'.format(op))) as file: reference.extend([msg] + file.read().splitlines()) self.assertEqual('\n'.join(reference), '\n'.join(generated))
def _create_methods(arith_method, radd_func, comp_method, bool_method, use_numexpr, special=False, default_axis='columns'): # creates actual methods based upon arithmetic, comp and bool method # constructors. # NOTE: Only frame cares about default_axis, specifically: special methods # have default axis None, whereas flex methods have default axis 'columns' # if we're not using numexpr, then don't pass a str_rep if use_numexpr: op = lambda x: x else: op = lambda x: None if special: def names(x): if x[-1] == "_": return "__%s_" % x else: return "__%s__" % x else: names = lambda x: x radd_func = radd_func or operator.add # Inframe, all special methods have default_axis=None, flex methods have # default_axis set to the default (columns) new_methods = dict( add=arith_method(operator.add, names('add'), op('+'), default_axis=default_axis), radd=arith_method(radd_func, names('radd'), op('+'), default_axis=default_axis), sub=arith_method(operator.sub, names('sub'), op('-'), default_axis=default_axis), mul=arith_method(operator.mul, names('mul'), op('*'), default_axis=default_axis), truediv=arith_method(operator.truediv, names('truediv'), op('/'), truediv=True, fill_zeros=np.inf, default_axis=default_axis), floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'), default_axis=default_axis, fill_zeros=np.inf), # Causes a floating point exception in the tests when numexpr # enabled, so for now no speedup mod=arith_method(operator.mod, names('mod'), None, default_axis=default_axis, fill_zeros=np.nan), pow=arith_method(operator.pow, names('pow'), op('**'), default_axis=default_axis), # not entirely sure why this is necessary, but previously was included # so it's here to maintain compatibility rmul=arith_method(operator.mul, names('rmul'), op('*'), default_axis=default_axis, reversed=True), rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'), default_axis=default_axis, reversed=True), rtruediv=arith_method(lambda x, y: operator.truediv(y, x), names('rtruediv'), op('/'), truediv=True, fill_zeros=np.inf, default_axis=default_axis, reversed=True), rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x), names('rfloordiv'), op('//'), default_axis=default_axis, fill_zeros=np.inf, reversed=True), rpow=arith_method(lambda x, y: y ** x, names('rpow'), op('**'), default_axis=default_axis, reversed=True), rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'), default_axis=default_axis, reversed=True), ) new_methods['div'] = new_methods['truediv'] new_methods['rdiv'] = new_methods['rtruediv'] # Comp methods never had a default axis set if comp_method: new_methods.update(dict( eq=comp_method(operator.eq, names('eq'), op('==')), ne=comp_method(operator.ne, names('ne'), op('!='), masker=True), lt=comp_method(operator.lt, names('lt'), op('<')), gt=comp_method(operator.gt, names('gt'), op('>')), le=comp_method(operator.le, names('le'), op('<=')), ge=comp_method(operator.ge, names('ge'), op('>=')), )) if bool_method: new_methods.update(dict( and_=bool_method(operator.and_, names('and_'), op('&')), or_=bool_method(operator.or_, names('or_'), op('|')), # For some reason ``^`` wasn't used in original. xor=bool_method(operator.xor, names('xor'), op('^')), rand_=bool_method(lambda x, y: operator.and_(y, x), names('rand_'), op('&')), ror_=bool_method(lambda x, y: operator.or_(y, x), names('ror_'), op('|')), rxor=bool_method(lambda x, y: operator.xor(y, x), names('rxor'), op('^')) )) new_methods = dict((names(k), v) for k, v in new_methods.items()) return new_methods
def test_overlap_and_conflict(self, versop_other): """ Test if there is any overlap between this instance and versop_other, and if so, if there is a conflict or not. Returns 2 booleans: has_overlap, is_conflict @param versop_other: a VersionOperator instance Examples: '> 3' and '> 3' : equal, and thus overlap (no conflict) '> 3' and '< 2' : no overlap '< 3' and '> 2' : overlap, and conflict (region between 2 and 3 is ambiguous) '> 3' and '== 3' : no overlap '>= 3' and '== 3' : overlap, and conflict (boundary 3 is ambigous) '> 3' and '>= 3' : overlap, no conflict ('> 3' is more strict then '>= 3') """ versop_msg = "this versop %s and versop_other %s" % (self, versop_other) if self == versop_other: self.log.debug( "%s are equal. Return overlap True, conflict False." % versop_msg) return True, False # from here on, this versop and versop_other are not equal same_boundary = self.version == versop_other.version boundary_self_in_other = versop_other.test(self.version) boundary_other_in_self = self.test(versop_other.version) same_family = False for fam in self.OPERATOR_FAMILIES: fam_op = [self.OPERATOR_MAP[x] for x in fam] if self.operator in fam_op and versop_other.operator in fam_op: same_family = True include_ops = [self.OPERATOR_MAP[x] for x in self.INCLUDE_OPERATORS] self_includes_boundary = self.operator in include_ops other_includes_boundary = versop_other.operator in include_ops if boundary_self_in_other and boundary_other_in_self: msg = "Both %s are in each others range" % versop_msg if same_boundary: if op.xor(self_includes_boundary, other_includes_boundary): self.log.debug( "%s, one includes boundary and one is strict => overlap, no conflict" % msg) return True, False else: # conflict self.log.debug( "%s, and both include the boundary => overlap and conflict" % msg) return True, True else: # conflict self.log.debug( "%s, and different boundaries => overlap and conflict" % msg) return True, True else: # both boundaries not included in one other version expression # => never a conflict, only possible overlap msg = 'same boundary %s, same family %s;' % (same_boundary, same_family) if same_boundary: if same_family: # overlap if one includes the boundary overlap = self_includes_boundary or other_includes_boundary else: # overlap if they both include the boundary overlap = self_includes_boundary and other_includes_boundary else: # overlap if boundary of one is in other overlap = boundary_self_in_other or boundary_other_in_self self.log.debug( "No conflict between %s; %s overlap %s, no conflict" % (versop_msg, msg, overlap)) return overlap, False
def run_bench(name=None, worker_package_with_default_scenario=None, nodes=None, workers_per_node=None, env={}, email=None, should_fail=False, max_retries=2, expected_log_message_regex=None, check_log_function=None, check_user_log_function=None, post_start=None): email_option = ('--email=' + email) if email else '' if workers_per_node: nodes_option = '--workers_per_node ' + str(workers_per_node) else: if nodes: nodes_option = '--nodes ' + ','.join(nodes) else: nodes_option = '--nodes 1' env_option = ' '.join(('--env={0}={1}'.format(k, v) for k, v in env.iteritems())) def run(): if 'worker_branch' in env: node_commit_arg = '--node_commit={0}'.format(env['worker_branch']) else: node_commit_arg = '' flags = ' '.join([ '--host=localhost:4800', node_commit_arg, nodes_option, env_option, email_option]) if name is not None: invocation = mzbench_dir + 'bin/mzbench ' + flags + ' start ' + name else: raise RuntimeError('Neither script filename nor default scenario package provided.') start = subprocess.Popen(shlex.split(invocation.encode('ascii')), stdout=subprocess.PIPE, stderr=subprocess.PIPE) start_out, start_err = start.communicate() try: bench_id = json.loads(start_out)['id'] except Exception: print 'mzbench returned invalid json: \nCommand: {0}\nOutput: {1}\nStderr: {2}'.format(invocation, start_out, start_err) raise if (post_start is not None) and wait_status(bench_id, 'running', 240): print "Calling post start for {0}".format(bench_id) post_start(bench_id) wait = subprocess.Popen(shlex.split( mzbench_dir + 'bin/mzbench --host=localhost:4800 status --wait {0}'.format(bench_id)), stdout=subprocess.PIPE, stderr=subprocess.PIPE) wait.communicate() return (bench_id, wait.returncode == 0) attempt = 0 while attempt < max_retries: print 'Attempt #{0}'.format(attempt) try: (bench_id, success) = run() except Exception as e: print "Unexpected error: {0}".format(e) bench_id, success = (None, False) if xor(success, should_fail): if not expected_log_message_regex and not check_log_function and not check_user_log_function: # no need to check the log return bench_id log_cmd = mzbench_dir + 'bin/mzbench --host=localhost:4800 log {0}'.format(bench_id) log = cmd(log_cmd) if expected_log_message_regex: if isinstance(expected_log_message_regex, str) or isinstance(expected_log_message_regex, unicode): regex = re.compile(expected_log_message_regex, re.DOTALL + re.UNICODE) else: regex = expected_log_message_regex if not regex.search(log): print print u"Log doesn't contain expected log message '{0}':\n".format(regex.pattern) print log raise RuntimeError if check_log_function: maybe_error = check_log_function(log) if maybe_error: print print "Log doesn't pass custom check:\n{0}\n\n".format(maybe_error) print log raise RuntimeError if check_user_log_function: log_cmd = mzbench_dir + 'bin/mzbench --host=localhost:4800 userlog {0}'.format(bench_id) log = cmd(log_cmd) maybe_error = check_user_log_function(log) if maybe_error: print print "Log doesn't pass custom check:\n{0}\n\n".format(maybe_error) print log raise RuntimeError return bench_id print 'Attempt #{0} for bench-id {1} unexpectedly {2}, retrying.'.format(attempt, bench_id, 'succeeded' if should_fail else 'failed') attempt += 1 if (max_retries <= attempt): print('All {0} attempts failed'.format(max_retries)) print('Log of the last attempt (bench {0}):'.format(bench_id)) if bench_id is not None: log_cmd = mzbench_dir + 'bin/mzbench --host=localhost:4800 log {0}'.format(bench_id) print cmd(log_cmd).replace('\\n', '\n') raise RuntimeError('BenchId {0} for test {1} unexpectedly {2}'.format( bench_id, name, 'succeeded' if should_fail else 'failed'))
def _apply_operation(self, result): return operator.xor(result[0], result[1])
def testOperators(self): with self.test_session(): var_f = tf.Variable([2.0]) add = var_f + 0.0 radd = 1.0 + var_f sub = var_f - 1.0 rsub = 1.0 - var_f mul = var_f * 10.0 rmul = 10.0 * var_f div = var_f / 10.0 rdiv = 10.0 / var_f lt = var_f < 3.0 rlt = 3.0 < var_f le = var_f <= 2.0 rle = 2.0 <= var_f gt = var_f > 3.0 rgt = 3.0 > var_f ge = var_f >= 2.0 rge = 2.0 >= var_f neg = -var_f abs_v = abs(var_f) var_i = tf.Variable([20]) mod = var_i % 7 rmod = 103 % var_i var_b = tf.Variable([True, False]) and_v = operator.and_(var_b, [True, True]) or_v = operator.or_(var_b, [False, True]) xor_v = operator.xor(var_b, [False, False]) invert_v = ~var_b rnd = np.random.rand(4, 4).astype("f") var_t = tf.Variable(rnd) slice_v = var_t[2, 0:0] tf.initialize_all_variables().run() self.assertAllClose([2.0], add.eval()) self.assertAllClose([3.0], radd.eval()) self.assertAllClose([1.0], sub.eval()) self.assertAllClose([-1.0], rsub.eval()) self.assertAllClose([20.0], mul.eval()) self.assertAllClose([20.0], rmul.eval()) self.assertAllClose([0.2], div.eval()) self.assertAllClose([5.0], rdiv.eval()) self.assertAllClose([-2.0], neg.eval()) self.assertAllClose([2.0], abs_v.eval()) self.assertAllClose([True], lt.eval()) self.assertAllClose([False], rlt.eval()) self.assertAllClose([True], le.eval()) self.assertAllClose([True], rle.eval()) self.assertAllClose([False], gt.eval()) self.assertAllClose([True], rgt.eval()) self.assertAllClose([True], ge.eval()) self.assertAllClose([True], rge.eval()) self.assertAllClose([6], mod.eval()) self.assertAllClose([3], rmod.eval()) self.assertAllClose([True, False], and_v.eval()) self.assertAllClose([True, True], or_v.eval()) self.assertAllClose([True, False], xor_v.eval()) self.assertAllClose([False, True], invert_v.eval()) self.assertAllClose(rnd[2, 0:0], slice_v.eval())
def are_compatible(self, previous, current): """ verify the gramatica relation between the two words. دراسة الترابط النخوي بين الكلمتين، اي توافقهما في الجمع والنوع، والحركة If the current word is related with the previous word, return True. The previous word can contain a pointer to the next word. the current can have a pointer to the previous if they ara realated @param previous: the previous stemmed word, choosen by the tashkeel process. @type previous:stemmedSynWord class @param current: the current stemmed word. @type current:stemmedSynWord class @return: return if the two words are related syntaxicly. @rtype: boolean """ #الكلمتان اسمان if (not ((previous.is_noun() or previous.is_addition()) and current.is_noun())): return False compatible = False # التعريف # إمّا معرفان معا، أو نكرتان معا if not xor(current.is_defined(), previous.is_defined()): compatible = True else: return False # التنوين if not xor(current.is_tanwin(), previous.is_tanwin()): compatible = True else: return False # التذكير والتأنيث # مذكر ومذكر #مؤنث ومؤنث # جمع التكسير مؤنث if ((current.is_feminin() and previous.is_feminin()) or (current.is_masculin() and previous.is_masculin())): compatible = True else: return False # العدد # والتثنية والإفراد الجمع # إمّا مفردان معا، أو جمعان معا أو مثنيان معا if ((current.is_plural() and previous.is_plural()) or (current.is_dual() and previous.is_dual()) or (current.is_single() and previous.is_single()) or (current.is_single() and current.is_feminin() and previous.is_plural() and previous.is_feminin())): compatible = True else: return False # الحركة # تساوي الحالة الإعرابية if (current.is_majrour() and previous.is_majrour()) \ or (current.is_mansoub() and previous.is_mansoub()) \ or (current.is_marfou3()and previous.is_marfou3()): compatible = True else: return False # الكلمة الثانية غير مسبوقة بسابقة غير التعريف # هذا التحقق جاء بعد التحقق من التعريف أو التنكير if not current.has_procletic() or current.get_procletic() in ( u"ال", u"فال", u"وال", u"و", u"ف"): compatible = True else: return False #ToDo: fix feminin and masculin cases #~ if not xor (current.is_feminin(), previous.is_feminin()): #~ compatible = True #~ else: #~ return False return compatible