def encode(data, nsym=DEFAULT_NSYM): chunk_size = BLOCK_SIZE - nsym - 1 for _, chunk in common.iterate(data, chunk_size, bytearray, truncate=False): size = len(chunk) if size < chunk_size: padding = [0] * (chunk_size - size) chunk.extend(padding) block = bytearray([size]) + chunk yield rs_encode_msg(block, nsym) yield rs_encode_msg(end_of_stream(chunk_size), nsym)
def main(): # Reed message from file and encode msg = open('data/input.txt', 'r').read().encode() msgs = split_str(msg, n - 2 * t) # Open timer file in append mode encode_time = open('data/encode_time_python.csv', 'a') decode_time = open('data/decode_time_python.csv', 'a') # Generate table and generater polynomial # prim = rs.find_prime_polys(c_exp=m, fast_primes=True, single=True) # rs.init_tables(c_exp=m, prim=prim) rs.init_tables(0x14d) gen = rs.rs_generator_poly_all(n) for i in range(runs): # Encode msg msgeccs = [] t1 = time.perf_counter() for m in msgs: msgeccs.append(rs.rs_encode_msg(m, 2 * t, gen=gen[2 * t])) t2 = time.perf_counter() encode_time.write(str(t2 - t1) + '\n') # Error for msgecc in msgeccs: error_inject(msgecc) # Decode corrections = [] t1 = time.perf_counter() for msgecc in msgeccs: corrections.append(rs.rs_correct_msg(msgecc, 2 * t)) t2 = time.perf_counter() decode_time.write(str(t2 - t1) + '\n') rmsg = b'' for c in corrections: rmsg += c[0] # Check result if (msg.decode() == rmsg.decode()): print("True") else: print("False")
# #print("enc= ", enc) # #print("dec= ", dec) # if msg==dec: # print("Success decoding RS<%d,%d> time = %f " %(255-i,i,end_time-start_time)) # else: # print("fail") prim = rs.find_prime_polys(c_exp=12, fast_primes=True, single=True) rs.init_tables(c_exp=12, prim=prim) n = 255 nsym = 12 mes = "a" * (n-nsym) #mesbytarray=[elem.encode(decimal.Decimal) for elem in mes] gen = rs.rs_generator_poly_all(n) print(len(gen)) enc = rs.rs_encode_msg(mes, nsym, gen=gen[nsym]) enc[1] = 0 enc[2]=0 enc[4]=0 enc[8]=0 enc[32] = 0 enc[33]=0 rmes, recc = rs.rs_correct_msg(enc, nsym, erase_pos=None) # print("msg= ", mes) # print("enc= ", enc) # print("dec= ", rmes) mesbytarray="".join(chr(i) for i in rmes) #print("mesba= ",mesbytarray) #print(type(mes),type(mesbytarray))
reedsolo.init_tables(0x11d) # Probability of errors #pe = [0.1, 0.25, 0.5, 0.75, 1] pe = [0.001, 0.01, 0.03, 0.05, 0.1, 0.15, 0.25, 0.40, 0.5] petrace = pe + [1] # Number of packets to test for each probability n = 10000 #nkarr= [2] nkarr = [2, 8, 18] for nfig in range(len(nkarr)): # Packet to be tested nk = nkarr[nfig] packet = reedsolo.rs_encode_msg('123456789', nk) #packet=reedsolo.rs_encode_msg('1', nk) l = len(packet) dec = [0] * (len(pe) + 1) ndec = [0] * (len(pe) + 1) idec = [0] * (len(pe) + 1) # Simulation loop for i, inum in enumerate(pe): print('Progress: %d/%d' % (i + 1, len(pe))) for j in range(n): en = packet[:] # Channel simulation for x, y in enumerate(en): # Add errors based on probability to be tested
#pe = [0.1, 0.25, 0.5, 0.75, 1] pe = [0.001, 0.01, 0.03, 0.05, 0.1, 0.15, 0.25, 0.40, 0.5] lpe = len(pe) + 1 # Number of packets to test for each probability n = 10000 dec = [0] * lpe idec = [0] * lpe terr = [0] * lpe naks = [0] * lpe narq = [0] * lpe ndec = [0] * lpe nsym = [0] * lpe veff = [0] * lpe p9 = reedsolo.rs_encode_msg('1234' * 2 + '1', 18) p65 = reedsolo.rs_encode_msg('1234' * 16 + '1', 18) p237 = reedsolo.rs_encode_msg('1234' * 59 + '1', 18) p = [] # Factorial of a number def factorial(n): return reduce(lambda x, y: x * y, [1] + range(1, n + 1)) # Calculation of packet decoding error probability (binomial form) def PDEP(p, N, nk): min = (nk / 2) + 1 pdep = 0 for k in range(min, N + 1):
def rs_check(data): mlen=len(data)-nsym msg=reedsolo.rs_encode_msg(data[:mlen],nsym+elen,fcr=fcr) return bytearray(data[mlen:])==msg[mlen:len(data)]
print("size of exp table ", asizeof.asizeof(rs.gf_exp)) print("size of log table is ", asizeof.asizeof(rs.gf_log)) msg_len_array = [] nsym_len_array = [] enc_time_array = [] dec_time_array = [] for msg_len in range(10, max_length / 3, 100): msg = randomString(msg_len) nsym = msg_len * 2 #consider corruption of all bits if nsym + msg_len > max_length: nsym = max_length - msg_len #encode the message gen_one = rs.rs_generator_poly(nsym=nsym) start_time4 = time.time() enc = rs.rs_encode_msg(msg, nsym, gen=gen_one) end_time4 = time.time() encoding_time = end_time4 - start_time4 poscorrup = random.sample(range(0, msg_len + nsym - 2), nsym / 2) for x in poscorrup: enc[x] = random.randint(0, 100) start_time5 = time.time() rmes, recc = rs.rs_correct_msg(enc, nsym, erase_pos=None) end_time5 = time.time() decoding_time = end_time5 - start_time5 mesbytarray = "".join(chr(i) for i in rmes) if msg == mesbytarray: print(" msg len= %d errors=%d enc_timr= %f dec_time= %f" %
def send(stream, length, type=0): # Initializing the stream if not type: stream = cStringIO.StringIO(stream) # Sequence init seq = 20 # FEC rate calculation and packet distribution from error map fec_rate = get_fec() updater(-1, fec_rate) # Divide the data into packets. # As the maximum FEC bytes allowed in a packet is 18, the maximum fec bytes to add to a 8+1 # message is 18 (corrects 9 errors, fec rate becomes 1). # With the same reasoning, on a 64+1 packet, the maximum code rate is 0.13, and for # a 236+1 packet it becomes 0.03. However, limitations should be done in order to prevent # errors with the ARQ system, so the code rate should be lower than the actual maximum to allow # NAKs to be used. data236 = 0.0 data64 = 0.0 data8 = 0.0 if fec_rate < lim236: data236 = math.floor(length / 236) data64 = math.floor((length % 236) / 64.0) data8 = math.ceil(((length % 236) % 64) / 8.0) elif fec_rate < lim64: data64 = math.floor(length / 64.0) data8 = math.ceil((length % 64) / 8.0) else: data8 = math.ceil(length / 8.0) # Send packets loop while True: # Declare data packet sizes if data236 > 0: data = stream.read(236) data236 -= 1 elif data64 > 0: data = stream.read(64) data64 -= 1 elif data8 > 0: data = stream.read(8) data8 -= 1 # Last 8 bytes if (data236 + data64 + data8) == 0: # For last packet, sequence number = 8 - len(data) seq = 8 - len(data) data = data + ((8 - len(data)) * '\x00') else: return 1 # From this block get: data / len(data) # Alternate sequence number if not last packet if seq >= 9: seq = 8 elif seq == 8: seq = 9 # From this block get: seq (1 byte) # How many FEC bytes to add to the packet? FEC must be multiples of 2 for error correction fec_bytes = NK(fec_rate, (len(data) + 1)) + updater_count if fec_bytes > 18: fec_bytes = 18 elif fec_bytes < 2: fec_bytes = 2 #Init vars nak_count = 0 first_packet = [] #NAK loop while True: # Send two bytes of FEC for every count of NAK if nak_count > 0: fec_bytes += 2 # Detect maximum and minimum of fec bytes if fec_bytes == 0: fec_bytes = 2 elif fec_bytes > 18: logging.critical( 'In function send(): FEC bytes reached maximum') print("In function send(): FEC bytes already maximum") return None # From this block get: fec_bytes (number of fec_bytes to add) # Encode packet with fec_bytes bytes # Pre-process Reed-Solomon packet with full 18 bytes if nak_count > 0: # Send only the two bytes needed (Hybrid ARQ IR) fec_packet = first_packet[-(18 - fec_bytes + 2):-( 18 - fec_bytes)] if fec_bytes < 18 else first_packet[-2:] else: first_packet = reedsolo.rs_encode_msg( chr(seq) + data, 18, 0, 2, gen[18]) if fec_bytes == 18: fec_packet = first_packet else: fec_packet = first_packet[0:-(18 - fec_bytes)] # From this block get: packet (final packet containing seq + msg + cobs + fec # Add header according to packet (alternate as the seq number for retransmissions) packet = reedsolo.rs_encode_msg( chr(len(fec_packet) + (nak_count % 2)), 2, 0, 2, gen[2]) packet.extend(fec_packet) # Sending of packet and timeout checking answer = send_loop_tout(packet) if answer == None: # Header returned None logging.critical( "In function send(): Max timeouts while getting response.") print("Timeout while sending data") return None elif answer == 'Error': # Header undecodable nak_count += 1 if nak_count > 1: updater(0) continue # Parse and decode answer - if undecodable resend packet if answer == ord(ACK): if nak_count == 0: # Update only if it's within the various packets' range if (fec_rate < lim236 and data236 > 0) or ( lim236 <= fec_rate < lim64 and data64 > 0) or (fec_rate >= lim64): updater(1) break elif answer == ord(NAK): nak_count += 1 if nak_count == 1: # Update only if it's within the various packets' range if (fec_rate < lim236 and data236 > 0) or ( lim236 <= fec_rate < lim64 and data64 > 0) or (fec_rate >= lim64): updater(0)
bytesize=serial.EIGHTBITS, #inter_byte_timeout=0.1, timeout=0) # Init precomputed tables for Reed Solomon reedsolo.init_tables(0x11d) # Pre-generation of polynomials for faster encoding gen = reedsolo.rs_generator_poly_all(19) ################ DEFINES # Protocol bytes ACK = b'\x07' NAK = b'\x08' ack_pack = reedsolo.rs_encode_msg(ACK, 2, 0, 2, gen[2]) nak_pack = reedsolo.rs_encode_msg(NAK, 2, 0, 2, gen[2]) #For send mode a cancel with packet length must be issued to be recognized by the receiver # Relative location of the AUV. # For simulation purposes, randomize the location loc = [random.randint(10, 179), random.randint(0, 30)] # Error map file (CSV) mapFile = 'error_map_slave' # Timout management TOUT_recv = 0.7 # Timeout seconds for ARQ system TOUT_send = 0.5 # Timeout seconds for send function # Limits of the packets (SER)
else: nkarr[i] += (float(K) / (nk + K)) * (1 - PDEP(p, (K + nk), nk)) nk = (nkarr.index(max(nkarr)) + 1) * 2 # Limit packets of 237 to 4 bytes to avoid singleton bound errors return nk + 2 if nk <= 2 and K >= 100 else nk # Init reedsolo.init_tables(0x11d) # Number of packets to test n = 2000 p = reedsolo.rs_encode_msg('1234' * 2 + '1', 18) # Simulation loop dectry = [0] * n idec = [0] * n naks = [0] * (n / 10) nsym = [0] * n terr = [0] * n nfec = [0] * n for j in range(n): nki = NK(notpe, 9) + updater_count nak_count = 0 nfec[j] = nki en = []
def place_data(base: QRMatrix, raw_data_code: List, rs_block_info: List, error_code_word_count: int, mask_id: int) -> QRMatrix: """ Places data on QRMatrix, and RETURNS DATA PART ONLY. :param mask_id: :param error_code_word_count: :param base: Base QRMatrix, THIS HAS TO HAVE ALL THE NECESSARY MODULES READY!! :param raw_data_code: RAW DATA, DO NOT INCLUDE ERROR CORRECTING CODES! :param rs_block_info: List of Tuple of (RS block data code count, number of that RS blocks.) :return: """ data_code = copy.deepcopy(raw_data_code) rs_blocks = [] # split the data code according to the RS block information. current_index = 0 for info in rs_block_info: word_count = info[0] repeat_count = info[1] for _ in range(repeat_count): rs_blocks.append(data_code[current_index:current_index + word_count]) current_index = current_index + word_count # For each data code, calculate the error codes. ecc_word_count = int(error_code_word_count / len(rs_blocks)) rs_block_error_codes = [] rs.init_tables(0x11d) for rs_block in rs_blocks: total_length = len(rs_block) + ecc_word_count gen = rs.rs_generator_poly_all(total_length) mesecc = rs.rs_encode_msg(rs_block, ecc_word_count, gen=gen[ecc_word_count]) print([x for x in gen[ecc_word_count]]) rs_block_error_codes.append(mesecc) # i_fx = copy.deepcopy(rs_block) # i_fx = i_pad_codes(i_fx, gx_word_count) # rs_block_error_codes.append(i_galois_division(i_fx, GaloisDividerDictionary.get_divider_for(gx_word_count))) binary_code = [] # First, interleave all the rs_blocks. code_index = 0 # earlier RS blocks may run out of data code at the end. in such case, carry on. while True: continue_count = 0 for block_id in range(len(rs_blocks)): if code_index >= len(rs_block_error_codes[block_id]): continue_count += 1 continue binary = convert_int_to_bool_array( rs_block_error_codes[block_id][code_index], 8) binary_code += binary if continue_count == len(rs_blocks): break code_index += 1 i_rs_block_error_codes = [x for x in rs_block_error_codes[0]] print(i_rs_block_error_codes) # +) 25*25 All matrix # -) 8*8*3 Placement Pattern # -) 5*5 Mini Placement Pattern # -) 9*2 Timing pattern # -) 15*2 Metadata error_area_start_index = len(data_code) * 8 # Then, we interleave all the rs_block_error_codes. """ code_index = 0 while True: continue_count = 0 for block_id in range(len(rs_block_error_codes)): if code_index >= len(rs_block_error_codes[block_id]): continue_count += 1 continue binary = convert_int_to_bool_array(rs_block_error_codes[block_id][code_index], 8) binary_code += binary if continue_count == len(rs_block_error_codes): break code_index += 1 """ data_buffer = QRMatrix(version=base.version) # How we place the data: # we start from bottom right. Then we decide which vertical direction to 'go when the situation requires first.' # (you can pick from up and down. We'll go UP this time) # After placing a bit in bottom right corner, we look at the two columns # (the one that you put the data + the one to the left.) # If you are currently on the right: # Try going LEFT. BUT if the fixed pattern is already sitting there, # You need to go whichever vertical direction you're going. # If this is the first one it's probably UP in this case # If you are currently on the left: # Look at the block to the current vertical direction. # If the block is vacant... # Go in that direction. If the right neighbour is vacant, place there. if not, place it here. # If the block is taken... # Try looking at the next neighbour in the current vertical direction. Do the same. # If you ran out of the space... # Give up going in the direction, go LEFT and place the bit there. # This ALSO causes the VERTICAL DIRECTION to FLIP! # we have created `buffer` all the way up. # FIRST INDEX IS DOWN, SECOND INDEX IS RIGHT. r = base.length - 1 c = base.length - 1 code_index = 0 is_going_up = True is_on_right = True while True: if code_index == error_area_start_index: print( "Error area is starting from row-wise {}, column-wise {}. We're going {} at this point" .format(r, c, "up" if is_going_up else "down")) # Place the data here assert base.value[r, c].is_null() assert data_buffer.value[r, c].is_null() # For masking reason, data will be saved in different place data_buffer.value[r, c] = QRModule.from_condition(binary_code[code_index]) # increment the index code_index = code_index + 1 if code_index == len(binary_code): break # Are you on the right? if is_on_right: # Try going to left... is it vacant? if base.value[r, c - 1].is_null(): c = c - 1 # then it's safe to put there is_on_right = False continue else: # Then try going into current vertical directions until you hit vacancy. while True: r = r + (-1 if is_going_up else 1) if base.value[r, c].is_null(): break assert 0 <= r < base.length, "You're not supposed to go out of the buffer like this." continue else: # You're on the left # Check the neighbouring block in current direction. checking_row = r while True: checking_row = checking_row + (-1 if is_going_up else 1) # if you end up running out of buffer during this process. # just go left and flip the direction. You're considered to be on right after this. if not (0 <= checking_row < base.length): # But be careful not to step on other data. checking_column = c - 1 checking_row = r while True: assert 0 <= checking_column < base.length, "You ran out of buffer in column direction" # try to find vacancy in the next left column. if base.value[checking_row, checking_column].is_null(): break checking_row = checking_row - 1 # If for some reason there are none, we need to check next column. # We preserve is_on_right at this point. if not (0 <= checking_row < base.length): checking_column = checking_column - 1 checking_row = r # reset checking row to try again r = checking_row c = checking_column is_going_up = not is_going_up is_on_right = True break # See the one to the right. is it vacant? if base.value[checking_row, c + 1].is_null(): # Right one is vacant! r = checking_row c = c + 1 is_on_right = True break elif base.value[checking_row, c].is_null(): # Left one is vacant! r = checking_row is_on_right = False break # Else, we need to continue going up or down. continue # return data_buffer for r in range(base.length): for c in range(base.length): # Some modules may not be used (e.g. Ver.5-Q, Binary mode.) # 'True vacancy,' which is occupied by neither data nor metadata, # seems to be treated as ON module - but we don't know for sure. if not base.value[r, c].is_null(): # We do not flip base value stuff. continue if data_buffer.value[r, c].is_null(): print("Null value detected at R: {0}, C: {1}, padded as True". format(r, c)) data_buffer.value[r, c] = QRModule.off() if MaskPattern.calculate(r, c, mask_id): if data_buffer.value[r, c].is_null(): print("Null value detected at R: {0}, C: {1}".format(r, c)) data_buffer.value[r, c].flip() return data_buffer