def prime_sieve(top=10005): b = BitArray(top) # bitstring of ’0’ bits for i in range(2, top): if not b[i]: yield i # i is prime, so set all its multiples to ’1’. b.set(True, range(i * i, top, i))
def get_bits(length, mode=-1): """生成指定长度的位串. length -- 位串长度 mode -- 0 返回全0 1 返回全1 -1 返回随机位串 """ # 生成指定长度的位串的最大值 bits = BitArray(length) bits.set(1) bin_str = '' if mode == 0: bits.set(0) bin_str = '0b' + bits.bin elif mode == 1: bin_str = '0b' + bits.bin else: # print 'all_1_bit:bin:' + bits.bin # print 'all_1_bit:uint:' + bits.uint.__str__() # 生成随机数,0到最大值 random_num = random.randint(0, bits.uint) # print 'random_num:' + random_num.__str__() bin_str = bin(random_num) # print 'created bit:' + bin_str[2:] return bin_str
def prime_sieve(top=100000000): b = BitArray(top) # bitstring of ’0’ bits last = int(math.sqrt(top)) + 1 for i in range(2, last): if not b[i]: yield i # i is prime, so set all its multiples to ’1’. b.set(True, range(i * i, top, i))
def init_prime_bitarray(maxnum): """Initialize a bitarray with the supplied length, and find primes with a prime sieve.""" global primes_ba maxnum += 1 primes_ba = BitArray(maxnum) primes_ba.set(1) primes_ba[0], primes_ba[1] = [0, 0] for i in [2] + list(range(3, maxnum, 2)): if primes_ba[i]: for j in range(i + i, maxnum, i): primes_ba[j] = 0
class BloomFilter: def __init__(self, size, hash_count): self.array = BitArray(length=size) self.hash_list = [md5() in range(hash_count)] def load(self, items): for item in items: self.array.set(1, [h.real for h in self.hash_list]) def __contains__(self, item): a = [h.real for h in self.hash_list] return self.array[a]
def answer(): ba = BitArray(2000000) ba.set(1) for i in range(4, 2000000, 2): ba[i] = 0 for i in range(3, 2000000, 2): if ba[i]: for j in range(i + i, 2000000, i): ba[j] = 0 return sum(i for i in range(3, 2000000, 2) if ba[i]) + 2
def evaluate(target): good = BitArray(len(target)) # all characters that are part of a meaningful word bestLength = 0 # length of the longest word bestPattern = BitArray(len(target)) # characters that form the longest word for i in range(len(target)): match = t.longest_prefix(key=target[i:], default="") if len(match)>0: temp = BitArray(len(target)) temp.set(1, range(i, i+len(match))) # set mathcing character positions to 1 good.set(1, range(i, i+len(match))) # set mathcing character positions to 1 if len(match)>bestLength: bestLength = len(match) bestPattern = temp return bestPattern, good
def get_bitstring(descriptors): d = {} result = [] for i in descriptors.columns: a = md5(i.encode()).digest() xx = BitArray(a) l = [] for j in range(ACTIVE_BITS): s = xx[j*bs_slice:(j+1)*bs_slice].uint l.append(s) d[i] = l for _, i in descriptors.iterrows(): hfp = BitArray(2 ** bs_slice) for k, v in i.items(): if v: hfp.set(True, d[k]) result.append(hfp) return result
def analyze(self): numbers = BitArray(2**32) numbers.set(False, xrange(0, 2**32)) f = open('integers', 'rb') for line in f: data = '' if len(line) < 2: data, = struct.unpack('B', line[:1]) elif len(line) < 4: data, = struct.unpack('H', line[:2]) else: data, = struct.unpack('I', line[:4]) numbers[data] = True f.close() self.missing = [] for i in xrange(2**32): if not numbers[i]: self.missing.append(numbers[i])
MAXN = 1000000005 isHappy = BitArray(MAXN) unHappy = BitArray(MAXN) isHappy[1] = True for case in range(1, 1 + int(input())): try: n = int(input()) except: break path = set() curr = n flag = True while not isHappy[curr] and not unHappy[curr]: if isHappy[curr]: break if unHappy[curr]: flag = False break path.add(curr) curr = sum(map(lambda x: x ** 2, map(int, str(curr)))) if curr in path: flag = False break if flag: isHappy.set(True, path) print 'Case #%d: %d is a Happy number.' % (case, n) else: unHappy.set(True, path) print 'Case #%d: %d is an Unhappy number.' % (case, n)
import sys import bisect from bitstring import BitArray top = 100000 primes = set() digitPrimes = [] b = BitArray(top) # bitstring of ’0’ bits for i in range(2, top): if not b[i]: primes.add(i) if sum(map(int, str(i))) in primes: digitPrimes.append(i) # i is prime, so set all its multiples to ’1’. b.set(True, range(i * i, top, i)) sys.stdin = open('input.txt') cases = int(input()) for x in range(cases): t1, t2 = map(int, raw_input().split()) print bisect.bisect(digitPrimes, t2) - bisect.bisect(digitPrimes, t1)
from bitstring import BitArray import sys MAXN = 1000000 bits = BitArray(MAXN) for x in range(1, MAXN): if not bits[x]: print x while True: next = x + sum(ord(c) - ord('0') for c in str(x)) if next < MAXN: bits.set(True, next) x = next else: break
from bitstring import BitArray import bisect import sys top = 2000000 b = BitArray(top) # bitstring of ’0’ bits for i in range(2, top): if not b[i]: b.set(True, range(i * i, top, i)) sys.stdin = open('input.txt') while True: try: n = int(input()) except: break rev = int(str(n)[::-1]) if not b[n] and not b[rev]: print '%d is emirp.' % n elif not b[n]: print '%d is prime.' % n else: print '%d is not prime.' % n
def sample_sequences(positions, buildname, basedir, options): """ """ rpt_err = options.rpt_err gc_err = options.gc_err max_trys = options.max_trys norpt = options.norpt nogc = options.nogc chrnames = sorted(set(map(lambda p: p[0], positions))) profiles = make_profile(positions, buildname, basedir) excluded = [] if options.skipfile: excluded = read_bed_file(options.skipfile, chr) f = open(options.output,"w") for chrom in chrnames: print chrom idxf_na = os.path.join(basedir, '.'.join([buildname, chrom, 'na', 'out'])) idxf_gc = os.path.join(basedir, '.'.join([buildname, chrom, 'gc', 'out'])) idxf_rpt = os.path.join(basedir, '.'.join([buildname, chrom, 'rpt', 'out'])) bits_gc = Bits(filename=idxf_gc) bits_rpt = Bits(filename=idxf_rpt) #this bit array is used to mark positions that are excluded from sampling #this will be updated as we sample more sequences in order to prevent sampled sequences from overlapping bits_na = BitArray(filename=idxf_na) #mark excluded regions for pos in excluded: if pos[0] != chrom: continue bits_na.set(True, range(pos[1], pos[2])) npos+=1 npos = 0 #mark positive regions for pos in positions: if pos[0] != chrom: continue bits_na.set(True, range(pos[1], pos[2])) npos+=1 if options.count == 0: count = options.fold*npos sampled_cnt = 0 while sampled_cnt < count: sampled_prof = random.choice(profiles) sampled_len = sampled_prof[1] sampled_gc = sampled_prof[2] sampled_rpt = sampled_prof[3] rpt_err_allowed = int(rpt_err*sampled_len) gc_err_allowed = int(gc_err*sampled_len) trys = 0 while trys < max_trys: trys += 1 pos = random.randint(1, bits_na.length - sampled_len) pos_e = pos+sampled_len #if bits_na.any(True, range(pos, pos_e)): # continue if bits_na[pos:pos_e].count(True) > 0: continue if not norpt: pos_rpt = bits_rpt[pos:pos_e].count(True) if abs(sampled_rpt - pos_rpt) > rpt_err_allowed: continue if not nogc: pos_gc = bits_gc[pos:pos_e].count(True) if abs(sampled_gc - pos_gc) > gc_err_allowed: continue #accept the sampled position #mark the sampled regions bits_na.set(True, range(pos, pos_e)) f.write('\t'.join([chrom, str(pos), str(pos_e)]) + '\n') sampled_cnt += 1 print trys, chrom, pos, pos_e, sampled_len, pos_rpt, sampled_rpt, pos_gc, sampled_gc break else: print "maximum trys reached" f.close()
MAXN = 1000000005 isHappy = BitArray(MAXN) unHappy = BitArray(MAXN) isHappy[1] = True for case in range(1, 1 + int(input())): try: n = int(input()) except: break path = set() curr = n flag = True while not isHappy[curr] and not unHappy[curr]: if isHappy[curr]: break if unHappy[curr]: flag = False break path.add(curr) curr = sum(map(lambda x: x**2, map(int, str(curr)))) if curr in path: flag = False break if flag: isHappy.set(True, path) print 'Case #%d: %d is a Happy number.' % (case, n) else: unHappy.set(True, path) print 'Case #%d: %d is an Unhappy number.' % (case, n)
return hash(seqid) & 0x7FFFFFFF if __name__ == '__main__': parser = argparse.ArgumentParser( description="Pair fastq files, writing all the pairs to separate files and the unmapped reads to separate files") parser.add_argument('-l', help='Pair #1 reads file', required=True) parser.add_argument('-r', help='Pair #2 reads file', required=True) parser.add_argument('-n', help='Number of sequences in the file. This is used to determine the size of the bloom filter. The default is 500,000,000', type=int, default=500000000) args = parser.parse_args() # define the bloom filter to be of size N. This should be at least 1.3x the number of reads. sys.stderr.write("Initializing the bloom filter\n") lbf = BitArray(args.n) lbf.set(0) sys.stderr.write("Completed initializing the bloom filter\n") counter = 0 # read and populate the bloom filter if args.l.endswith('.gz'): qin = gzip.open(args.l, 'rb') else: qin = open(args.l, 'r') while True: l = qin.readline() if not l: break counter += 1
def layer3(Message, type, ServiceType = BitArray('0x0110',length=4)): global L3_SC global L3_SUP crc = Crc() if '0xA' == type: ## Long Message #print(str(Message)) L3_Fragment = (len(Message)/160)+1 data = BitArray(length = L3_Fragment*160) data.set(0) for pos in range(len(Message)): data[pos] = Message[pos] #fill data #print(str(data)) #print(str(L3_Fragment) + ' ' + str(len(data))) i = 0 while i < L3_Fragment: L3Hdr_LCh = BitArray('0b1010', length = 4) # Long Message L3Hdr_DI= BitArray('0b0', length = 1) # No Decode Indicator if i < L3_Fragment -1: L3Hdr_LF = BitArray('0b0', length = 1) # Not the End else: L3Hdr_LF = BitArray('0b1', length = 1) # the End L3Hdr_SC = BitArray(uint = L3_SC%16, length=4) # Counter L3Hdr_LCh = L3Hdr_LCh[::-1] #Flip LSB/MSB L3Hdr_SC = L3Hdr_SC[::-1] #Flip LSB/MSB L3Hdr_PreCRC = L3Hdr_LCh + L3Hdr_DI + L3Hdr_LF + L3Hdr_SC L3Hdr_CRC = crc.crc6(L3Hdr_PreCRC) L3Hdr = L3Hdr_LCh + L3Hdr_DI + L3Hdr_LF + L3Hdr_SC +L3Hdr_CRC dataFragment = data[0+(160*i):160+(160*i)] # Marshalling of l3_body # TODO beautify and pack into function # 2 byte L3 Header, 20 Byte L3 data #data_ = BitArray(length=160) for pos in range(len(dataFragment)): if not (pos % 8): word = dataFragment[pos:pos+8] word = word[::-1] dataFragment[pos:pos+8] = word #print(str(dataFragment)) L3Packet = L3Hdr + dataFragment i+=1 #print(str(L3Packet)) layer2(L3Packet) L3_SC+=1 if '0xB' == type: ## Block Message L3Hdr_LCh = BitArray('0b1011', length = 4) # Bloc Message L3Hdr_DI= BitArray('0b0', length = 1) # No Decode Indicator L3Hdr_SCh = BitArray('0b100', length = 3) # Not the End L3Hdr_LCh = L3Hdr_LCh[::-1] #Flip LSB/MSB L3Hdr_SCh = L3Hdr_SCh[::-1] #Flip LSB/MSB L3Hdr = L3Hdr_LCh + L3Hdr_DI + L3Hdr_SCh data = BitArray(length = 168) data.set(0) for pos in range(len(Message)): data[pos] = Message[pos] #fill data dataFragment = data[0:168] # Marshalling of l3_body # TODO beautify and pack into function # 2 byte L3 Header, 20 Byte L3 data #data_ = BitArray(length=160) for pos in range(len(dataFragment)): if not (pos % 8): word = dataFragment[pos:pos+8] word = word[::-1] dataFragment[pos:pos+8] = word L3Packet = L3Hdr + dataFragment #print(str(L3Packet)) layer2(L3Packet) L3_SC+=1 if '0x8' == type: ## Service Message L3_Fragment = (len(Message)/152)+1 data = BitArray(length = L3_Fragment*152) data.set(0) for pos in range(len(Message)): data[pos] = Message[pos] #fill data #print(str(data)) #print(str(L3_Fragment) + ' ' + str(len(data))) i = 0 while i < L3_Fragment: L3Hdr_LCh = BitArray('0b1000', length = 4) # Service Message L3Hdr_LCh = L3Hdr_LCh[::-1] L3Hdr_RFA= BitArray('0b0', length = 1) # No Future if i < L3_Fragment -1: L3Hdr_LF = BitArray('0b0', length = 1) # Not the End else: L3Hdr_LF = BitArray('0b1', length = 1) # the End L3Hdr_DUP = BitArray(uint = L3_SUP%4, length=2) # Counter L3Hdr_CID = BitArray('0xD', length=4) # Germany L3Hdr_CID = L3Hdr_CID[::-1] L3Hdr_Type = ServiceType L3Hdr_Type = L3Hdr_Type[::-1] L3Hdr_NID = BitArray('0xC', length=4) #MVG L3Hdr_NID = L3Hdr_NID[::-1] L3Hdr_BLN = BitArray(uint=(L3_Fragment-1)%16, length=4) #Blocknumber L3Hdr_BLN = L3Hdr_BLN[::-1] L3Hdr = L3Hdr_LCh + L3Hdr_RFA + L3Hdr_LF + L3Hdr_DUP + L3Hdr_CID + L3Hdr_Type + L3Hdr_NID + L3Hdr_BLN dataFragment = data[0+(152*i):152+(152*i)] # Marshalling of l3_body # TODO beautify and pack into function # 2 byte L3 Header, 20 Byte L3 data #data_ = BitArray(length=160) for pos in range(len(dataFragment)): if not (pos % 8): word = dataFragment[pos:pos+8] word = word[::-1] dataFragment[pos:pos+8] = word #print(str(dataFragment)) L3Packet = L3Hdr + dataFragment i+=1 #print(str(L3Packet)) layer2(L3Packet) L3_SC+=1
def layer2(Message): global L3_SC global L2Frame scramble_table = BitArray('0b10101111101010101000000101001010111100101110111000000111001110100100111101011101010001001000011001110000101111011011001101000011101111000011111111100000111101111100010111001100100000100101001110110100011110011111001101100010101001000111000110110101011100010011000100010000') crc = Crc() if L3_SC == 0: L2Frame = [] print("Create Frame: " + str(FrameCnt) + "...this will take a while") #print(L3_SC) if L3_SC < 190: if L3_SC < 60: #BIC 1 L2_BIC = BitArray('0xA791', length = 16) else: if L3_SC >= 60 and L3_SC < 130: L2_BIC = BitArray('0x74A6', length = 16) else: L2_BIC = BitArray('0x135E', length = 16) L2_CRC = crc.crc14(Message) L2_MSGCRC = Message + L2_CRC L2_Parity = crc.crc82(L2_MSGCRC) L2Block = L2_BIC + Message + L2_CRC + L2_Parity #print(L3_SC) L2Frame.insert(L3_SC,L2Block) #print(str(L2Block)) #print(str(L2Frame[L3_SC])) if L3_SC == 189: k = 0 while k < 82: L2Frame.append(BitArray('0xc875', length = 16)) L2Frame[190+k].append(BitArray(length = 272)) k+=1 i = 0 j = 0 while i < 272: VerticalBlock = BitArray(length=190) while j < 190: Block = L2Frame[j] VerticalBlock.set(Block[i+16],j) j+=1 j = 0 VerticalCRC = crc.crc82(VerticalBlock) k = 0 while k < 82: L2Frame[190+k].set(VerticalCRC[k],i+16) k+=1 i+=1 l = 0 ####SCRAMBLE HERE i = 0 while i < 272: Scramble = L2Frame[i][16:288] ^ scramble_table L2Frame[i] = L2Frame[i][0:16]+Scramble i+=1 ################# fh = open('frame_output','a') msg = '' while l < len(L2Frame): #print(str(L2Frame[l])) m = 0 while m < len(L2Frame[l]): if (m%8 == 0): msg += chr(L2Frame[l][m:m+8].uint) m+=1 l+=1 fh.write(msg) fh.close()
class BloomFilter(object): ''' Class for Bloom filter, using murmur3 hash function ''' def __init__(self, items_count, fp_prob): ''' items_count : int Number of items expected to be stored in bloom filter fp_prob : float False Positive probability in decimal ''' # False posible probability in decimal self.fp_prob = fp_prob # Size of bit array to use self.size = self.get_size(items_count, fp_prob) # number of hash functions to use self.hash_count = self.get_hash_count(self.size, items_count) # Bit array of given size self.bit_array = BitArray(self.size) # MODIFICATION!! BitArray # initialize all bits as 0 self.bit_array.set(0) # BitArray def add(self, item): ''' Add an item in the filter ''' digests = [] for i in range(self.hash_count): # create digest for given item. # i work as seed to SHA256 hash function # With different seed, digest created is different raw_digest = MULTISHA256(item, i) digest = MULTISHA256( item, i) % self.size # MODIFIACTION! SHA256 with seed digests.append(digest) # set the bit True in bit_array self.bit_array[digest] = True def check(self, item): ''' Check for existence of an item in filter ''' for i in range(self.hash_count): digest = MULTISHA256(item, i) % self.size # MODIFIACTION! SHA256 if self.bit_array[digest] == False: # if any of bit is False then,its not present # in filter # else there is probability that it exist return False return True @classmethod def get_size(self, n, p): ''' Return the size of bit array(m) to used using following formula m = -(n * lg(p)) / (lg(2)^2) n : int number of items expected to be stored in filter p : float False Positive probability in decimal ''' m = -(n * math.log(p)) / (math.log(2)**2) return int(m) @classmethod def get_hash_count(self, m, n): ''' Return the hash function(k) to be used using following formula k = (m/n) * lg(2) m : int size of bit array n : int number of items expected to be stored in filter ''' k = (m / n) * math.log(2) return int(k)
class Client: def __init__(self, ip_addr, port, filename): atexit.register(self.exit_handler) #list of peers (and connection info) that this client is connected to self.connection_list = list() self.listen_list = list() self.metainfo = Metainfo(filename) self.filename = filename self.ip_addr = ip_addr self.port = port self.peer_id = os.urandom(20) self.info_hash = self.metainfo.info_hash self.uploaded = 0 self.downloaded = 0 #if client has file, set left to 0 and bitfield to full self.tracker_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.tracker_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.tracker_socket.connect((socket.gethostbyname('localhost'), 6969)) self.bitfield = BitArray(self.metainfo.num_pieces) if (self.check_for_file()): self.bitfield.set(True) self.left = 0 else: self.bitfield.set(False) self.left = self.metainfo.file_length self.send_GET_request(0) self.listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.listening_socket.bind( (socket.gethostbyname(self.ip_addr), self.port)) self.listening_socket.listen(5) # while True: # (opentracker_socket, opentracker_addr) = self.socket.accept() # response = socket.recv(4096) # print(response) tracker_response = self.tracker_socket.recv(1024) print("Response: ", tracker_response) status = re.split(" ", str(tracker_response)[2:-1]) if (status[1] != "200"): print("ERROR") else: print("parsing tracker response...") self.response = tracker_response self.handle_tracker_response() listening_thread = threading.Thread(target=self.listen_for_handshake) listening_thread.daemon = True listening_thread.start() def leech(self): while True: time.sleep(.2) if self.bitfield.all(True): print(self.bitfield.all(True)) break #determine which piece to look for index = self.next_piece() #see who on peer list has the piece for connection in self.connection_list: if connection.has_piece(index): #request the piece from the peer print("Requesting piece ", str(index), " from ", connection.peer_port) self.request_piece(index, connection) break #stop when bitfield is full print("done") self.reassemble_file() return #close connections but keep listening def send_have_message(self, index): have_message = bytearray(9) have_message[0:4] = struct.pack('>i', int(5)) have_message[4] = struct.pack('>i', int(4)) have_message[5:9] = struct.pack('>i', int(index)) for listener in listen_list: print("Sending have message ", have_message, "to ", listener) socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) socket.connect(listener) socket.send(have_message) socket.close() return def check_for_file(self): if os.path.exists(self.filename): print("FILE EXISTS") #piece length 65536 #file size 4641991 #split file into temporary chunks f = open(self.filename, "rb") for i in range(self.metainfo.num_pieces - 1): temp_filename = "temp-" + str(i) + self.filename fout = open(temp_filename, 'wb') b = bytearray() b = f.read(self.metainfo.piece_length) if (b): fout.write(b) piece_hash = hashlib.sha1() piece_hash.update(b) #print(i) #print(piece_hash.hexdigest()) #print(self.metainfo.get_piece_hash(i)) temp_filename = "temp-" + str(self.metainfo.num_pieces - 1) + self.filename fout = open(temp_filename, 'w+b') while True: b = f.read(1) if (b): fout.write(b) else: break return True else: print("FILE DOESN'T EXIST") requesting_thread = threading.Thread(target=self.leech) requesting_thread.daemon = True requesting_thread.start() return False #if file is in local directory start running in seeder state #else if file is not in local directory run in leecher state def send_GET_request(self, event): get_request = bytearray(map(ord, "GET /announce?info_hash=")) get_request.extend( map(ord, urllib.parse.quote_plus(self.metainfo.info_hash.digest()))) get_request.extend(map(ord, "&peer_id=")) get_request.extend(map(ord, urllib.parse.quote_plus(self.peer_id))) get_request.extend(map(ord, "&port=")) get_request.extend(bytes(str(self.port), "ascii")) #ignore key get_request.extend(map(ord, "&uploaded=")) get_request.extend(bytes(str(self.uploaded), "ascii")) get_request.extend(map(ord, "&downloaded")) get_request.extend(bytes(str(self.downloaded), "ascii")) get_request.extend(map(ord, "&left")) get_request.extend(bytes(str(self.left), "ascii")) get_request.extend(map(ord, "&compact=1")) if event == 0: get_request.extend(map(ord, "&event=started HTTP/1.1\r\n\r\n")) elif event == 1: get_request.extend(map(ord, "&event=completed HTTP 1.1\r\n\r\n")) elif event == 2: get_request.extend(map(ord, "&event=stopped HTTP/1.1\r\n\r\n")) else: get_request.extend(map(ord, " HTTP/1.1\r\n\r\n")) print("GET request: ", get_request) #send HTTP request to tracker self.tracker_socket.send(get_request) return get_request def handle_tracker_response(self): print("Raw Response: ", self.response) #decoded_response = self.response.decode('utf-8') #print("Decoded Response: ", decoded_response) split_decoded = self.response.split(b'\r\n\r\n') response_dict = decode(split_decoded[1]) print("Tracker response: ", response_dict) #check for failure reason if b'failure reason' in response_dict: print(response_dict[b'failure reason'].decode('utf-8')) else: self.interval = response_dict[b'interval'] #self.tracker_id = response_dict[b'tracker id'] self.complete = response_dict[b'complete'] self.incomplete = response_dict[b'incomplete'] #parse list of peers peerlist = list() unparsed_peers = response_dict[b'peers'] print(unparsed_peers) #add peers to list of tuples (IP, port) for x in range(len(unparsed_peers) // 6): ip = socket.inet_ntoa(unparsed_peers[x * 6:x * 6 + 4]) port = int.from_bytes(unparsed_peers[x * 6 + 4:x * 6 + 6], byteorder='big') #print("Reading peer ", ip, port) peerlist.append((ip, port)) print(peerlist) self.peer_list = peerlist #for each peer in peer list, check if connected for (IP, port) in self.peer_list: #print(self.ip_addr, IP, self.ip_addr != IP) #print(self.port, port, self.port != port) if (IP != self.ip_addr) or (port != self.port): for connection in self.connection_list: if (IP == connection.peer_ip_addr) and ( port == connection.peer_port): break else: #if not connected, initiate handshake with peer print("Connect to peer ", IP, port) handshake_thread = threading.Thread( target=self.initiate_handshaking, args=(IP, port)) handshake_thread.daemon = True handshake_thread.start() handshake_thread.join() def initiate_handshaking(self, IP, port): handshake_message = self.generate_handshake_msg() new_connection = Connection(self, IP, port, BitArray(self.metainfo.num_pieces)) #send start of handshake new_connection.sock.send(handshake_message) print("Handshake initiated ", handshake_message) handshake_response = new_connection.sock.recv(1024) #if handshake response is valid, save connection if handshake_response[0] == 18 and handshake_response[ 1:19] == b'URTorrent protocol' and handshake_response[ 19: 27] == b'\x00\x00\x00\x00\x00\x00\x00\x00' and handshake_response[ 27:47] == self.info_hash.digest(): new_connection.peer_id = handshake_response[47:68] print("Received valid handshake response ", handshake_response) self.connection_list.append(new_connection) new_connection.start() def listen_to_peer(self, peer, address): while True: try: message = peer.recv(1024) if message: print("received message") #check messsage type message_prefix = struct.unpack('>i', message[0:4])[0] print("Message Prefix = ", message_prefix) message_id = message[4] print("Message ID = ", message_id) if message_id == 6: print("Request message received") index = message[5:9] begin = message[9:13] length = message[13:17] #request message #check that peer is not choked #send requested chunk length_prefix = 9 + 65536 piece_payload = bytearray(b'\x07') piece_payload.extend(index) piece_payload.extend(begin) print("Sending piece Message: ", piece_payload) int_index = struct.unpack('>i', index)[0] filename = "temp-" + str( int_index) + self.metainfo.filename f = open(filename, 'rb') b = bytearray() if int_index < self.metainfo.num_pieces - 1: b = f.read(65536) else: b = f.read(self.metainfo.file_length % self.metainfo.piece_length) #print("Last piece data: ", b) piece_payload.extend(b) piece_message = bytearray( struct.pack('>i', len(piece_payload))) piece_message.extend(piece_payload) peer.send(piece_message) self.uploaded += 1 elif message_id == 8: #cancel message print("Cancel Message") elif message_id == 4: #have message print("Have Message") #self.bitfield.bin[] elif message_id == 5: #bitfield message #check that bitfield is correct length if (len(self.bitfield.bin) == len(message[5:])): print("Received bitfield of ", message[5:]) #self.peer_bitfield.bin = message[5:6+len(self.peer_bitfield)] #print(self.peer_bitfield) #default block size is 16384 elif message_id == 7: #piece message index = message[5:9] begin = message[9:13] piece = message[13:] #save piece else: print("Invalid Message") time.sleep(.1) else: print("No data received") time.sleep(1) except Exception as e: print(e) return False def next_piece(self): #find missing piece #print("Choose piece") while True: j = randint(0, self.metainfo.num_pieces - 1) if self.bitfield.bin[j] == '0': #print("Chose next piece index = ", j) return j else: return 0 def request_piece(self, index, peer_connection): piece_request = bytearray(18) piece_request[0:4] = struct.pack('>i', int(13)) piece_request[4] = 6 piece_request[5:9] = struct.pack('>i', int(index)) piece_request[9:13] = struct.pack('>i', int(0)) if index < self.metainfo.num_pieces - 1: piece_request[13:17] = struct.pack('>i', int(self.metainfo.piece_length)) else: print("Last Piece") file_size = self.metainfo.file_length last_piece_size = file_size % self.metainfo.num_pieces #print(last_piece_size) piece_request[13:17] = struct.pack('>i', int(last_piece_size)) print("Sending piece request: ", piece_request) peer_connection.sock.send(piece_request) return 0 #generate handshake message def generate_handshake_msg(self): handshake = bytearray(b'\x12') handshake.extend(map(ord, "URTorrent protocol")) handshake.extend(bytearray(8)) handshake.extend(self.metainfo.info_hash.digest()) handshake.extend(self.peer_id) #print("Handshake message: ", handshake) return handshake def reassemble_file(self): print("Reassembling File...") f = open(self.filename, 'ab') for i in range(self.metainfo.num_pieces): piece_filename = "temp-" + str(i) + self.filename piece = open(piece_filename, 'rb') b = bytearray() b = piece.read(65536) if not b: while True: k = piece.read(1) if k: b.extend(k) else: break f.write(b) def exit_handler(self): self.send_GET_request(2) print("Client closing") self.tracker_socket.close() self.listening_socket.close() print("Cleaning up temporary files") for fl in os.listdir(): if "temp" in fl: os.remove(fl) print("Quitting...") def listen_for_handshake(self): #listen for handshakes i = 0 while True: i = i + 1 try: peer_connection, address = self.listening_socket.accept() print(i) buf = peer_connection.recv(1024) #print(len(buf)) #if message is handshake if buf[0] == 18 and buf[1:19] == b'URTorrent protocol' and buf[ 19:27] == b'\x00\x00\x00\x00\x00\x00\x00\x00' and buf[ 27:47] == self.info_hash.digest(): #if len(buf) > 0 and "URTorrent" in str(buf): #ip = connection_socket.getsockname()[0] #port = connection_socket.getsockname()[1] #connection_socket.close() print("Received valid handshake", buf) #self.listen_list.append() new_listener = (peer_connection.getsockname()[0], address) self.listen_list.append(new_listener) #print(self.listen_list[0]) peer_connection.send(self.generate_handshake_msg()) time.sleep(.1) #send bitfield message bitfield_message = bytearray(1 + self.metainfo.num_pieces) bitfield_message[0:4] = struct.pack( '>i', int(1 + self.metainfo.num_pieces)) bitfield_message[4] = 5 bitfield_message[5:] = self.bitfield print("Sending bitfield message: ", bitfield_message) peer_connection.send(bitfield_message) #split off thread to listen for piece requests on this socket peer_connection.settimeout(120) listen_thread = threading.Thread( target=self.listen_to_peer, args=(peer_connection, address)) listen_thread.daemon = True listen_thread.start() # listen_thread.join() #listen = Listen(self) #listen.start() #self.listen_list[0].start() except Exception as exc: print(str(exc)) peer_connection.close() break except KeyboardInterrupt: print("Closing") peer_connection.close() break
class LifObject(base.ConfigObjectBase): def __init__(self): super().__init__() self.Clone(Store.templates.Get('LIF')) return def Init(self, tenant, spec, namespace=None): if namespace: self.id = namespace.get() else: self.id = resmgr.LifIdAllocator.get() self.GID("Lif%d" % self.id) self.status = haldefs.interface.IF_STATUS_UP self.hw_lif_id = -1 self.qstate_base = {} self.promiscuous = False self.allmulticast = False self.pds = [] self.c_lib_name = getattr(spec, 'c_lib', None) if self.c_lib_name: self.c_lib = clibs.LoadCLib(self.c_lib_name) if self.c_lib: self.c_lib_config = self.c_lib[self.c_lib_name + '_config'] self.c_lib_config.argtypes = [ctypes.POINTER(QInfoStruct)] self.c_lib_config.restype = None else: self.c_lib = None if hasattr(spec, 'rdma') and spec.rdma.enable: self.enable_rdma = spec.rdma.enable self.rdma_max_pt_entries = spec.rdma.max_pt_entries self.rdma_max_keys = spec.rdma.max_keys self.rdma_max_ahs = spec.rdma.max_ahs self.hostmem_pg_size = spec.rdma.hostmem_pg_size self.hbm_barmap_entries = (int(spec.rdma.hbm_barmap_size / spec.rdma.hostmem_pg_size)) self.rdma_tbl_pos = BitArray(length=self.rdma_max_pt_entries) self.hbm_tbl_pos = BitArray(length=self.hbm_barmap_entries) self.rdma_async_eq_id = 0 self.rdma_admin_aq_id = 1 self.rdma_admin_cq_id = 0 else: self.enable_rdma = False self.rdma_max_pt_entries = 0 self.rdma_max_keys = 0 self.rdma_max_ahs = 0 self.hbm_barmap_entries = 0 self.hostmem_pg_size = 0 self.rdma_pt_base_addr = 0 self.rdma_kt_base_addr = 0 self.rdma_dcqcn_profile_base_addr = 0 self.rdma_at_base_addr = 0 if hasattr(spec, 'nvme') and spec.nvme.enable: self.enable_nvme = spec.nvme.enable self.nvme_lif = nvme_lif.NvmeLifObject(self, spec.nvme) else: self.enable_nvme = False self.vlan_strip_en = False self.vlan_insert_en = False self.queue_types = objects.ObjectDatabase() self.obj_helper_q = queue_type.QueueTypeObjectHelper() self.obj_helper_q.Generate(self, spec) self.queue_types.SetAll(self.obj_helper_q.queue_types) self.queue_types_list = self.obj_helper_q.queue_types self.queue_list = [] for q_type in self.queue_types_list: for queue in q_type.queues.GetAll(): self.queue_list.append(queue) # RDMA per LIF allocators if self.enable_rdma: # QP 0, 1 are special QPs self.qpid_allocator = objects.TemplateFieldObject( "range/2/" + str(spec.rdma.max_qp)) # AQ ID 0 is owned by ETH self.aqid_allocator = objects.TemplateFieldObject( "range/1/" + str(spec.rdma.max_aq)) # Reserve CQ 0, 1 for special QPs, AQ self.cqid_allocator = objects.TemplateFieldObject( "range/2/" + str(spec.rdma.max_cq)) self.eqid_allocator = objects.TemplateFieldObject( "range/0/" + str(spec.rdma.max_eq)) self.pd_allocator = objects.TemplateFieldObject( "range/0/" + str(spec.rdma.max_pd)) self.mr_key_allocator = objects.TemplateFieldObject( "range/1/" + str(spec.rdma.max_mr)) self.slab_allocator = objects.TemplateFieldObject("range/0/2048") self.kslab_allocator = objects.TemplateFieldObject("range/0/2048") # Generate RDMA LIF owned resources self.slabs = objects.ObjectDatabase() # Generate KernelSlab of 4KB and 10 MB for FRPMR self.kslab_4KB = slab.SlabObject(self, 4096, True) self.obj_helper_slab = slab.SlabObjectHelper() #slab_spec = spec.rdma.slab.Get(Store) #self.obj_helper_slab.Generate(self, slab_spec) #self.slabs.SetAll(self.obj_helper_slab.slabs) # Create EQs for RDMA LIF self.eqs = objects.ObjectDatabase() self.obj_helper_eq = eq.EqObjectHelper() self.obj_helper_eq.Generate(self, spec.rdma.max_eq, spec.rdma.max_eqe) if len(self.obj_helper_eq.eqs): self.eqs.SetAll(self.obj_helper_eq.eqs) # Create CQ 0 for adminQ logger.info("Creating 1 Cqs. for LIF:%s" % (self.GID())) # Hardcode CQ 0 for AQ # Page size is calculated as max_cqe * cqe_size by the CQ for privileged resources cq_id = 0 self.cq = cqs.CqObject(None, cq_id, spec.rdma.max_cqe, 0, True, self) # Create AdminQ logger.info("Creating 1 Aqs. for LIF:%s" % (self.GID())) aq_id = self.GetAqid() self.aq = aqs.AqObject(self, aq_id, spec.rdma.max_aqe, spec.rdma.hostmem_pg_size) self.dcqcn_config_spec = spec.rdma.dcqcn_config.Get(Store) if hasattr(spec, 'rss'): self.rss_type = ( haldefs.interface.LifRssType.Value("RSS_TYPE_IPV4") | haldefs.interface.LifRssType.Value("RSS_TYPE_IPV4_TCP") | haldefs.interface.LifRssType.Value("RSS_TYPE_IPV4_UDP") | haldefs.interface.LifRssType.Value("RSS_TYPE_IPV6") | haldefs.interface.LifRssType.Value("RSS_TYPE_IPV6_TCP") | haldefs.interface.LifRssType.Value("RSS_TYPE_IPV6_UDP")) self.rss_key = array.array('B', toeplitz.toeplitz_msft_key) self.rss_indir = array.array('B', [0] * 128) else: self.rss_type = haldefs.interface.LifRssType.Value("RSS_TYPE_NONE") self.rss_key = array.array('B', toeplitz.toeplitz_msft_key) self.rss_indir = array.array('B', [0] * 128) self.tenant = tenant self.spec = spec self.tx_qos_class = None self.rx_qos_class = None if self.tenant.IsQosEnabled(): self.tx_qos_class = getattr(spec, 'tx_qos_class', None) self.rx_qos_class = getattr(spec, 'rx_qos_class', None) if self.tx_qos_class: self.tx_qos_class = Store.objects.Get(self.tx_qos_class) if self.rx_qos_class: self.rx_qos_class = Store.objects.Get(self.rx_qos_class) self.Show() def GetQpid(self): return self.qpid_allocator.get() def GetCqid(self): return self.cqid_allocator.get() def GetEqid(self): return self.eqid_allocator.get() def GetAqid(self): return self.aqid_allocator.get() def GetSlabid(self): return self.slab_allocator.get() def GetKSlabid(self): return self.kslab_allocator.get() def GetPd(self): return self.pd_allocator.get() def GetMrKey(self): return self.mr_key_allocator.get() def GetRdmaTblPos(self, num_pages): if num_pages <= 0: return total_pages = int(num_pages) if total_pages <= 8: total_pages = 8 else: total_pages = 1 << (total_pages - 1).bit_length() logger.info( "- LIF: %s Requested: %d Actual: %d page allocation in RDMA PT Table" % (self.GID(), num_pages, total_pages)) page_order = BitArray(length=total_pages) page_order.set(False) tbl_pos = self.rdma_tbl_pos.find(page_order) assert (tbl_pos) self.rdma_tbl_pos.set(True, range(tbl_pos[0], tbl_pos[0] + total_pages)) return tbl_pos[0] def GetHbmTblPos(self, num_pages): if num_pages <= 0: return total_pages = int(num_pages) if total_pages <= 8: total_pages = 8 else: total_pages = 1 << (total_pages - 1).bit_length() logger.info( "- LIF: %s Requested: %d Actual: %d page allocation in NIC HBM barmap area" % (self.GID(), num_pages, total_pages)) page_order = BitArray(length=total_pages) page_order.set(False) tbl_pos = self.hbm_tbl_pos.find(page_order) assert (tbl_pos) self.hbm_tbl_pos.set(True, range(tbl_pos[0], tbl_pos[0] + total_pages)) return tbl_pos[0] def GetQt(self, type): return self.queue_types.Get(type) def GetQ(self, type, qid): qt = self.queue_types.Get(type) if qt is not None: return qt.queues.Get(str(qid)) def GetQstateAddr(self, type): if GlobalOptions.dryrun: return 0 return self.qstate_base[type] def GetTxQosCos(self): if self.tx_qos_class: return self.tx_qos_class.GetTxQosCos() return 7 def GetTxQosDscp(self): if self.tx_qos_class: return self.tx_qos_class.GetTxQosDscp() return 7 def ConfigureQueueTypes(self): if GlobalOptions.dryrun: return 0 self.obj_helper_q.Configure() def ConfigureRdmaLifRes(self): if self.enable_rdma: # EQID 0 on LIF is used for Async events/errors across all PDs self.async_eq = self.GetQ('RDMA_EQ', self.rdma_async_eq_id) fail = self.async_eq is None self.admin_eq = self.async_eq # Get EQID 0, CQID 0 for Admin queue AQ 0 self.admin_cq = self.GetQ('RDMA_CQ', self.rdma_admin_cq_id) fail = fail or self.admin_cq is None # AQ position in list is different from AQ qid self.adminq = self.GetQ('RDMA_AQ', self.rdma_admin_aq_id) fail = fail or self.adminq is None if (fail is True): assert (0) self.obj_helper_slab.Configure() self.kslab_4KB.Configure() if len(self.obj_helper_eq.eqs): self.obj_helper_eq.Configure() halapi.ConfigureCqs([self.cq]) halapi.ConfigureAqs([self.aq]) logger.info("Configuring DCQCN Configs for LIF:%s" % (self.GID())) self.dcqcn_config_helper = dcqcn.RdmaDcqcnProfileObjectHelper() self.dcqcn_config_helper.Generate(self, self.dcqcn_config_spec) self.dcqcn_config_helper.Configure() return 0 def Show(self): logger.info("- LIF : %s" % self.GID()) logger.info(" - # Queue Types : %d" % len(self.obj_helper_q.queue_types)) def PrepareHALRequestSpec(self, req_spec): req_spec.key_or_handle.lif_id = self.id req_spec.admin_status = self.status req_spec.enable_rdma = self.enable_rdma req_spec.rdma_max_keys = self.rdma_max_keys req_spec.rdma_max_ahs = self.rdma_max_ahs req_spec.rdma_max_pt_entries = self.rdma_max_pt_entries req_spec.vlan_strip_en = self.vlan_strip_en req_spec.vlan_insert_en = self.vlan_insert_en req_spec.pinned_uplink_if_key_handle.interface_id = 0x41010001 if self.tx_qos_class: req_spec.tx_qos_class.qos_group = self.tx_qos_class.GroupEnum() if self.rx_qos_class: req_spec.rx_qos_class.qos_group = self.rx_qos_class.GroupEnum() if GlobalOptions.classic: req_spec.packet_filter.receive_broadcast = True req_spec.packet_filter.receive_promiscuous = self.promiscuous req_spec.packet_filter.receive_all_multicast = self.allmulticast req_spec.rss.type = self.rss_type req_spec.rss.key = bytes(self.rss_key) req_spec.rss.indir = bytes(self.rss_indir) req_spec.enable_nvme = self.enable_nvme if self.enable_nvme: req_spec.nvme_max_ns = self.spec.nvme.max_ns req_spec.nvme_max_sess = self.spec.nvme.max_sess req_spec.nvme_host_page_size = self.spec.nvme.host_page_size else: req_spec.nvme_max_ns = 0 req_spec.nvme_max_sess = 0 req_spec.nvme_host_page_size = 0 for queue_type in self.queue_types.GetAll(): qstate_map_spec = req_spec.lif_qstate_map.add() queue_type.PrepareHALRequestSpec(qstate_map_spec) for queue in queue_type.queues.GetAll(): qstate_spec = req_spec.lif_qstate.add() queue.PrepareHALRequestSpec(qstate_spec) def ProcessHALResponse(self, req_spec, resp_spec): self.hal_handle = resp_spec.status.lif_handle if (self.c_lib): self.CLibConfig(resp_spec) if self.hw_lif_id == -1: # HAL does not return hw_lif_id in the UpdateResponse. Set the hw_lif_id only once. self.hw_lif_id = resp_spec.status.hw_lif_id logger.info( "- LIF %s = %s HW_LIF_ID = %s (HDL = 0x%x)" % (self.GID(), haldefs.common.ApiStatus.Name( resp_spec.api_status), self.hw_lif_id, self.hal_handle)) for qstate in resp_spec.qstate: logger.info("- QUEUE_TYPE = %d QSTATE_ADDR = 0x%x" % (qstate.type_num, qstate.addr)) self.qstate_base[qstate.type_num] = qstate.addr if self.enable_rdma: if resp_spec.rdma_data_valid: self.rdma_pt_base_addr = resp_spec.rdma_data.pt_base_addr self.rdma_kt_base_addr = resp_spec.rdma_data.kt_base_addr self.rdma_dcqcn_profile_base_addr = resp_spec.rdma_data.dcqcn_profile_base_addr self.rdma_at_base_addr = resp_spec.rdma_data.at_base_addr self.hbm_barmap_base = resp_spec.rdma_data.barmap_base_addr logger.info( "- RDMA-DATA: LIF %s = HW_LIF_ID = %s %s= 0x%x %s= 0x%x %s= 0x%x %s= 0x%x %s= 0x%x" % (self.GID(), self.hw_lif_id, 'PT-Base-Addr', self.rdma_pt_base_addr, 'KT-Base-Addr', self.rdma_kt_base_addr, 'DCQCN-Prof-Base-Addr', self.rdma_dcqcn_profile_base_addr, 'AT-Base-Addr', self.rdma_at_base_addr, 'BARMAP-Base-Addr', self.hbm_barmap_base)) def PrepareHALGetRequestSpec(self, req_spec): req_spec.key_or_handle.lif_id = self.id return def ProcessHALGetResponse(self, req_spec, resp_spec): logger.info( "- GET LIF %s = %s" % (self.GID(), haldefs.common.ApiStatus.Name(resp_spec.api_status))) self.get_resp = copy.deepcopy(resp_spec) return def Get(self): halapi.GetLifs([self]) return def GetStats(self): if GlobalOptions.dryrun: return None self.Get() return self.get_resp.stats def IsFilterMatch(self, spec): return super().IsFilterMatch(spec.filters) def CLibConfig(self, resp_spec): qaddrs_type = ctypes.c_uint64 * 8 qaddrs = qaddrs_type() for qstate in resp_spec.qstate: qaddrs[int(qstate.type_num)] = qstate.addr qinfo = QInfoStruct(GlobalOptions.dryrun, resp_spec.status.hw_lif_id, qaddrs) self.c_lib_config(ctypes.byref(qinfo)) def Update(self): halapi.ConfigureLifs([self], update=True) def SetPromiscous(self): logger.info("Setting PROMISCUOUS mode for LIF:%s" % self.GID()) self.promiscuous = True return def IsPromiscous(self): return self.promiscuous def SetAllMulticast(self): logger.info("Setting ALL MULTICAST mode for LIF:%s" % self.GID()) self.allmulticast = True return def IsAllMulticast(self): return self.allmulticast def IsFilterMatch(self, spec): return super().IsFilterMatch(spec.filters) def RegisterPd(self, pd): if self.enable_rdma: logger.info("Registering PD: %s LIF %s" % (pd.GID(), self.GID())) self.pds.append(pd) def AddSlab(self, slab): self.obj_helper_slab.AddSlab(slab) self.slabs.Add(slab)
modValue = 1000000 #read each line, split to remove extra chars. for line in file.readlines(): data = line.split() for line in data: #hash with the md5 hashfunction. hash = hashlib.md5(line.lower().encode('utf-8')).digest() #extract parts of the hash and convert to integer inside array index span h1 = int(hash[0:3].encode("hex"),16)%modValue h2 = int(hash[4:7].encode("hex"),16)%modValue h3 = int(hash[8:11].encode("hex"),16)%modValue h4 = int(hash[12:15].encode("hex"),16)%modValue #set the bits in the array. use the set function for speed. hashTable.set(True,h1) hashTable.set(True,h2) hashTable.set(True,h3) hashTable.set(True,h4) file.close() #open file as binary file and write to it. with open('data', 'wb') as outfile: hashTable.tofile(outfile) #print execution time. ~20 seconds on my system end = time.time() print(end-start)