def image_from_bits(self, bits, filename): # Convert the received payload to an image and save it # No return value required . pixel_values = np.array([], dtype=np.uint) #Retrieve length of each row row_length_bits = bits[0:8] row_length = np.packbits(row_length_bits)[0] #print "Forming image from bits..." #print "Row length bits: " + str(row_length_bits) #print "\tFound row length of " + str(row_length) #print pixel_idx = 8 while pixel_idx < len(bits): pixel_bits = bits[pixel_idx:pixel_idx+8] pixel_value = np.packbits(pixel_bits)[0] pixel_values = np.append(pixel_values, pixel_value) pixel_idx += 8 #int img = Image.new('L', (len(pixel_values) / row_length, row_length)) img.putdata(pixel_values) img.save(filename)
def write_data(dict_words): items = [] with open(POSTS_FILE_PATH, 'r') as f: for post in ijson.items(f, 'item'): labels = [0] * len(CATEGORIES) was = False for hub in post['hubs']: for cur_label, label in enumerate(CATEGORIES): if hub in label: labels[cur_label] = 1 was = True if not was: continue words = [0] * BagOfWords.NUM_VOCABULARY_SIZE post_words = post['content'] + post['title'] for word in post_words: if word in dict_words: words[dict_words[word]] = 1 else: words[BagOfWords.NUM_VOCABULARY_SIZE - 1] = 1 labels = np.packbits(labels).tolist() words = np.packbits(words).tolist() items.append((labels, words)) shuffle(items) train_set_size = int(len(items) * BagOfWords.TRAIN_RATIO) _write_set(TF_ONE_SHOT_TRAIN_FILE_PATH, items[:train_set_size]) _write_set(TF_ONE_SHOT_EVAL_FILE_PATH, items[train_set_size:]) print('Set size : ', len(items)) print('Train set size : ', train_set_size) print('Eval set size : ', len(items) - train_set_size)
def extractPayload(self): if self.payloadExists() is False: raise Exception("Error: carrier does not contain a payload") xml = "" shape = self.img.shape payload_bits = np.copy(self.img) payload_bits &= 1 if len(shape) > 2: red = payload_bits[:, :, 0] green = payload_bits[:, :, 1] blue = payload_bits[:, :, 2] payload_buff = np.concatenate((red.flatten('C'), green.flatten('C'), blue.flatten('C')), axis=0) payload = np.packbits(payload_buff) payload_list = [chr(item) for item in payload] for item in payload_list: xml += item if item == '>': termination = re.search(r"</payload>", xml) if termination: final_payload = Payload(None, -1, xml) return final_payload else: payload = np.packbits(payload_bits) payload_list = [chr(item) for item in payload] for item in payload_list: xml += item if item == '>': termination = re.search(r"</payload>", xml) if termination: final_payload = Payload(None, -1, xml) return final_payload
def test_name(self): for n in [16, 1, 5, 8, 16, 20, 20 * 8]: w = np.random.random(n) j = distpy.JaccardWeighted(w) j2 = pickle.loads(pickle.dumps(j, -1)) for x in range(n): a = np.zeros(n, dtype=np.uint8) b = np.zeros(n, dtype=np.uint8) a[x] = 1 b[x] = 1 a = np.packbits(a) b = np.packbits(b) out0 = j.dist(a, b) out1 = jaccard_weighted_slow(a, b, w) out2 = j2.dist(a, b) self.assertEqual(out0, out1) self.assertEqual(out0, w[x]) self.assertEqual(out0, out2) print((out0, out1, w[x])) for x in range(1000): bytes = np.ceil(n / 8.0) a = np.fromstring(np.random.bytes(bytes), dtype=np.uint8) b = np.fromstring(np.random.bytes(bytes), dtype=np.uint8) out0 = j.dist(a, b) out1 = jaccard_weighted_slow(a, b, w) out2 = j2.dist(a, b) self.assertAlmostEqual(out0, out1) self.assertEqual(out0, out2) print((out0, out1))
def test_packbits_very_large(): # test some with a larger arrays gh-8637 # code is covered earlier but larger array makes crash on bug more likely for s in range(950, 1050): for dt in '?bBhHiIlLqQ': x = np.ones((200, s), dtype=bool) np.packbits(x, axis=1)
def calculate_node_hashes(children_a, children_b, taxon_order): n_taxa = len(taxon_order) children = set.union(children_a, children_b) parent_boolean = numpy.zeros(n_taxa, dtype=numpy.uint8) split_boolean = numpy.zeros(n_taxa, dtype=numpy.uint8) i = 0 for j in range(n_taxa): t = taxon_order[j] if t in children: parent_boolean[j] = 1 if i == 0: if t in children_a: a_first = True else: a_first = False if (t in children_b) ^ a_first: # first child always "True" split_boolean[i] = 1 i += 1 parent_packed = numpy.packbits(parent_boolean) split_packed = numpy.packbits(split_boolean) parent_id = parent_packed.tostring() split_id = split_packed.tostring() return parent_id, split_id
def test_unpackbits_count(): # test complete invertibility of packbits and unpackbits with count x = np.array([ [1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 0, 0, 0], [0, 0, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1, 0, 1], [0, 0, 1, 1, 1, 0, 0], [0, 1, 0, 1, 0, 1, 0], ], dtype=np.uint8) padded1 = np.zeros(57, dtype=np.uint8) padded1[:49] = x.ravel() packed = np.packbits(x) for count in range(58): unpacked = np.unpackbits(packed, count=count) assert_equal(unpacked.dtype, np.uint8) assert_array_equal(unpacked, padded1[:count]) for count in range(-1, -57, -1): unpacked = np.unpackbits(packed, count=count) assert_equal(unpacked.dtype, np.uint8) # count -1 because padded1 has 57 instead of 56 elements assert_array_equal(unpacked, padded1[:count-1]) for kwargs in [{}, {'count': None}]: unpacked = np.unpackbits(packed, **kwargs) assert_equal(unpacked.dtype, np.uint8) assert_array_equal(unpacked, padded1[:-1]) assert_raises(ValueError, np.unpackbits, packed, count=-57) padded2 = np.zeros((9, 9), dtype=np.uint8) padded2[:7, :7] = x packed0 = np.packbits(x, axis=0) packed1 = np.packbits(x, axis=1) for count in range(10): unpacked0 = np.unpackbits(packed0, axis=0, count=count) assert_equal(unpacked0.dtype, np.uint8) assert_array_equal(unpacked0, padded2[:count, :x.shape[1]]) unpacked1 = np.unpackbits(packed1, axis=1, count=count) assert_equal(unpacked1.dtype, np.uint8) assert_array_equal(unpacked1, padded2[:x.shape[1], :count]) for count in range(-1, -9, -1): unpacked0 = np.unpackbits(packed0, axis=0, count=count) assert_equal(unpacked0.dtype, np.uint8) # count -1 because one extra zero of padding assert_array_equal(unpacked0, padded2[:count-1, :x.shape[1]]) unpacked1 = np.unpackbits(packed1, axis=1, count=count) assert_equal(unpacked1.dtype, np.uint8) assert_array_equal(unpacked1, padded2[:x.shape[0], :count-1]) for kwargs in [{}, {'count': None}]: unpacked0 = np.unpackbits(packed0, axis=0, **kwargs) assert_equal(unpacked0.dtype, np.uint8) assert_array_equal(unpacked0, padded2[:-1, :x.shape[1]]) unpacked1 = np.unpackbits(packed1, axis=1, **kwargs) assert_equal(unpacked1.dtype, np.uint8) assert_array_equal(unpacked1, padded2[:x.shape[0], :-1]) assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
def comprimir(arvore: HuffmanNode, bitGroupSize: int, compressedBitSize, codeTable : dict, dataBuffer): global paddingSize, nomeFicheiro try: outputHandler = open(nomeFicheiro.replace('.pbm', '.cpbm'), 'wb') #codificar a arvore de huffman e escreve-la no ficheiro writeTreeToFile(outputHandler, compressedBitSize, paddingSize, codeTable, bitGroupSize, arvore) print("A comprimir os dados com grupos de:", bitGroupSize) reiniciarBufferDoFicheiro() outBuffer = list() get = codeTable.get bitpos = 0 tamanhoFicheiroBits = tamanhoFicheiro * 8 while(bitpos < tamanhoFicheiroBits): #ler um grupo de bits bitGroup = read(dataBuffer, bitGroupSize) codigo = get(bitGroup, None) if(codigo != None): outBuffer.extend(codigo) bitpos +=bitGroupSize print("Padding adicional no fim:", paddingSize) np.packbits(outBuffer, -1).tofile(outputHandler) outputHandler.close() except: print("Compressao de ficheiro pbm concluida sem exito")
def elucidate_cc_split(parent_id, split_id): parent_id_bytes = numpy.array(tuple(parent_id)).view(dtype = numpy.uint8) split_id_bytes = numpy.array(tuple(split_id)).view(dtype = numpy.uint8) parent_id_bits = numpy.unpackbits(parent_id_bytes) split_id_bits = numpy.unpackbits(split_id_bytes) n_parent_bits = len(parent_id_bits) n_split_bits = len(split_id_bits) child1_bits = numpy.zeros(n_parent_bits, dtype = numpy.uint8) child2_bits = numpy.zeros(n_parent_bits, dtype = numpy.uint8) j = 0 for i in range(n_parent_bits): if parent_id_bits[i] == 1: if j < n_split_bits: if split_id_bits[j] == 1: child1_bits[i] = 1 else: child2_bits[i] = 1 else: child2_bits[i] = 1 j += 1 child1_bytes = numpy.packbits(child1_bits) child2_bytes = numpy.packbits(child2_bits) child1_id = child1_bytes.tostring().rstrip("\x00") # urgh C (null terminated strings) child2_id = child2_bytes.tostring().rstrip("\x00") # vs Python (not null terminated) strings return child1_id, child2_id
def test_bad_count(self): packed0 = np.packbits(self.x, axis=0) assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9) packed1 = np.packbits(self.x, axis=1) assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9) packed = np.packbits(self.x) assert_raises(ValueError, np.unpackbits, packed, count=-57)
def payloadExists(self): buffer = [] if self.color_flag: row = self.img[0] stop_flag = 0 for col in row: if col[0] & numpy.uint8(1) == 1: buffer.append(1) else: buffer.append(0) if stop_flag == 7: print(buffer) valid = chr(numpy.packbits(buffer)[0]) if valid == "<": return True else: return False stop_flag += 1 else: i = 0 row = self.img[0] while i < 8: if row[i] & numpy.uint8(1) == 1: buffer.append(1) else: buffer.append(0) i+= 1 valid = chr(numpy.packbits(buffer)[0]) if valid == "<": return True else: return False
def write(self, filename): header_bytes = struct.pack(CHUNK_HEADER_FORMAT, self.data_size, self.board_size, self.input_planes, self.is_test) position_bytes = np.packbits(self.pos_features).tostring() next_move_bytes = np.packbits(self.next_moves).tostring() with gzip.open(filename, "wb", compresslevel=6) as f: f.write(header_bytes) f.write(position_bytes) f.write(next_move_bytes)
def convert(data, se): """Convert data according to the schema encoding""" dtype = data.dtype type = se.type converted_type = se.converted_type if dtype.name in typemap: if type in revmap: out = data.values.astype(revmap[type], copy=False) elif type == parquet_thrift.Type.BOOLEAN: padded = np.lib.pad(data.values, (0, 8 - (len(data) % 8)), 'constant', constant_values=(0, 0)) out = np.packbits(padded.reshape(-1, 8)[:, ::-1].ravel()) elif dtype.name in typemap: out = data.values elif "S" in str(dtype)[:2] or "U" in str(dtype)[:2]: out = data.values elif dtype == "O": try: if converted_type == parquet_thrift.ConvertedType.UTF8: out = array_encode_utf8(data) elif converted_type is None: if type in revmap: out = data.values.astype(revmap[type], copy=False) elif type == parquet_thrift.Type.BOOLEAN: padded = np.lib.pad(data.values, (0, 8 - (len(data) % 8)), 'constant', constant_values=(0, 0)) out = np.packbits(padded.reshape(-1, 8)[:, ::-1].ravel()) else: out = data.values elif converted_type == parquet_thrift.ConvertedType.JSON: out = np.array([json.dumps(x).encode('utf8') for x in data], dtype="O") elif converted_type == parquet_thrift.ConvertedType.BSON: out = data.map(tobson).values if type == parquet_thrift.Type.FIXED_LEN_BYTE_ARRAY: out = out.astype('S%i' % se.type_length) except Exception as e: ct = parquet_thrift.ConvertedType._VALUES_TO_NAMES[ converted_type] if converted_type is not None else None raise ValueError('Error converting column "%s" to bytes using ' 'encoding %s. Original error: ' '%s' % (data.name, ct, e)) elif converted_type == parquet_thrift.ConvertedType.TIMESTAMP_MICROS: out = np.empty(len(data), 'int64') time_shift(data.values.view('int64'), out) elif converted_type == parquet_thrift.ConvertedType.TIME_MICROS: out = np.empty(len(data), 'int64') time_shift(data.values.view('int64'), out) elif type == parquet_thrift.Type.INT96 and dtype.kind == 'M': ns_per_day = (24 * 3600 * 1000000000) day = data.values.view('int64') // ns_per_day + 2440588 ns = (data.values.view('int64') % ns_per_day)# - ns_per_day // 2 out = np.empty(len(data), dtype=[('ns', 'i8'), ('day', 'i4')]) out['ns'] = ns out['day'] = day else: raise ValueError("Don't know how to convert data type: %s" % dtype) return out
def _generate_masks(self): """Creates left and right masks for all hash lengths.""" tri_size = MAX_HASH_SIZE + 1 # Called once on fitting, output is independent of hashes left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:] right_mask = left_mask[::-1, ::-1] self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE) self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def test_unpackbits_large(): # test all possible numbers via comparison to already tested packbits d = np.arange(277, dtype=np.uint8) assert_array_equal(np.packbits(np.unpackbits(d)), d) assert_array_equal(np.packbits(np.unpackbits(d[::2])), d[::2]) d = np.tile(d, (3, 1)) assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d) d = d.T.copy() assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
def main(args): x1 = np.load(args.infile1) x2 = np.load(args.infile2) assert len(x1.shape) == 2, 'infile1 should be 2d array!' assert len(x2.shape) == 2, 'infile2 should be 2d array!' assert x1.shape[0] == x2.shape[0], 'two infile should have same rows!' x1 = np.unpackbits(x1, axis=1) x2 = np.unpackbits(x2, axis=1) r1 = x1.shape[1] if args.row1 == 0 else args.row1 r2 = x2.shape[1] if args.row2 == 0 else args.row2 N = x1.shape[0] print(r1, r2, N) x1 = np.packbits(x1[:, :r1].T, axis=1) x2 = np.packbits(x2[:, :r2].T, axis=1) if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() x1 = cuda.to_gpu(x1) x2 = cuda.to_gpu(x2) xp = cupy else: xp = np # popcount LUT pc = xp.zeros(256, dtype=np.uint8) for i in range(256): pc[i] = ( i & 1 ) + pc[i/2] hamm = xp.zeros((r1, r2), dtype=np.int32) for i in tqdm(range(r1)): x1i = xp.tile(x1[i], (r2, 1)) if args.operation == 'xor': hamm[i] = xp.take(pc, xp.bitwise_xor(x1i, x2).astype(np.int32)).sum(axis=1) elif args.operation == 'nand': hamm[i] = xp.take(pc, xp.invert(xp.bitwise_and(x1i, x2)).astype(np.int32)).sum(axis=1) #for j in range(r2): #hamm[i, j] = xp.take(pc, xp.bitwise_xor(x1[i], x2[j])).sum() x1non0 = xp.tile((x1.sum(axis=1)>0), (r2, 1)).T.astype(np.int32) x2non0 = xp.tile((x2.sum(axis=1)>0), (r1, 1)).astype(np.int32) print(x1non0.shape, x2non0.shape) non0filter = x1non0 * x2non0 print(non0filter.max(), non0filter.min()) hamm = non0filter * hamm + np.iinfo(np.int32).max * (1 - non0filter) #non0filter *= np.iinfo(np.int32).max #hamm *= non0filter if xp == cupy: hamm = hamm.get() #xp.savetxt(args.out, hamm, delimiter=args.delim) np.save(args.out, hamm) if args.nearest > 0: hamm_s = np.sort(hamm.flatten()) hamm_as = np.argsort(hamm.flatten()) x, y = np.unravel_index(hamm_as[:args.nearest], hamm.shape) fname, ext = os.path.splitext(args.out) np.savetxt(fname + '_top{0}.tsv'.format(args.nearest), np.concatenate((x[np.newaxis], y[np.newaxis], hamm_s[np.newaxis,:args.nearest]), axis=0).T, fmt='%d', delimiter='\t')
def updateState(self): # TO DO: update the state variables reflected to vehicle data # i.e. to maintain continuity if vehicle data reread is requested # you need to map all the state control here setBigEndiNumberToNpArr(self.vehicle2_unpacked, getArrayIdxFromStartBit(55), 1,int(self.radar_poweron)) self.vehicle2.data = np.packbits(self.vehicle2_unpacked).tolist() setBigEndiNumberToNpArr(self.vehicle2_unpacked, getArrayIdxFromStartBit(22), 1,int(self.clear_fault_on)) self.vehicle2.data = np.packbits(self.vehicle2_unpacked).tolist() setBigEndiNumberToNpArr(self.vehicle2_unpacked, getArrayIdxFromStartBit(56), 1,int(self.rawdata_on)) self.vehicle2.data = np.packbits(self.vehicle2_unpacked).tolist()
def parameter_from_population(individual): first_gene = individual[:8] second_gene = individual[8:16] third_gene = individual[16:] first_gene = np.packbits(first_gene, axis=-1) second_gene = np.packbits(second_gene, axis=-1) third_gene = np.packbits(third_gene, axis=-1) param_mutation = first_gene * coef_mut param_cross = second_gene * coef_cross param_population_size = int(third_gene * coef_population_size) return param_cross[0], param_mutation[0], param_population_size
def test_pack_unpack_order(): a = np.array([[2], [7], [23]], dtype=np.uint8) b = np.unpackbits(a, axis=1) assert_equal(b.dtype, np.uint8) b_little = np.unpackbits(a, axis=1, bitorder='little') b_big = np.unpackbits(a, axis=1, bitorder='big') assert_array_equal(b, b_big) assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little')) assert_array_equal(b[:,::-1], b_little) assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big')) assert_raises(ValueError, np.unpackbits, a, bitorder='r') assert_raises(TypeError, np.unpackbits, a, bitorder=10)
def gost_algo(message, key): """ GOST 34.13-2015 MAC algorhythm implementation message - binary messag. key - 256bit binary key """ key_bytes = np.packbits(key).tobytes() msg_bytes = np.packbits(message).tobytes() gost_mac = MAC(key_bytes) digest_bytes = gost_mac(msg_bytes) return np.unpackbits(np.frombuffer(digest_bytes, dtype=np.uint8))
def gene_to_noise_params(individual, display=False): first_gene = individual[:gene_size] second_gene = individual[gene_size:gene_size * 2] third_gene = individual[gene_size * 2:] if display: print first_gene, second_gene, third_gene first_gene = np.packbits(first_gene, axis=-1) second_gene = np.packbits(second_gene, axis=-1) third_gene = np.packbits(third_gene, axis=-1) noise_amp = first_gene * coef_amp noise_freq_row = second_gene * coef_freq noise_freq_col = third_gene * coef_freq return noise_amp[0], noise_freq_row[0], noise_freq_col[0]
def write(self, bits): tmp = np.asarray(np.concatenate( (self.buf, np.fromiter(map(int, bits), dtype=np.ubyte)) ), dtype=np.ubyte) bytes = np.array_split(tmp, range(8, len(tmp), 8)) for b in bytes[:-1]: self.stream.write(np.packbits(b)) if len(bytes[-1]) == 8: self.stream.write(np.packbits(bytes[-1])) self.buf = np.empty(0, np.ubyte) else: self.buf = bytes[-1] return len(tmp) // 8
def _to_hash(projected): if projected.shape[1] % 8 != 0: raise ValueError('Require reduced dimensionality to be a multiple ' 'of 8 for hashing') # XXX: perhaps non-copying operation better out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE) return out.reshape(projected.shape[0], -1)
def packbits_axis(X, axis=-1): """Create a compact representation of rows of bits in numpy Parameters ---------- X : array_like a d-dimensional array whose rows will be treated as a sequence of bits axis : integer the axis along which to pack the bits (default=-1) Returns ------- x : array_like a (d - 1)-dimensional structured array containing sets of 8-bit integers which compactly represent the bits along the specified axis of X. """ X = np.asarray(X, dtype=np.uint8) # roll specified axis to the back if axis not in (-1, X.ndim - 1): X = np.rollaxis(X, axis).transpose(list(range(1, X.ndim)) + [0]) # make sure we have a C-ordered contiguous buffer X = np.asarray(X, order="C") bits = np.packbits(X, -1) return_shape = bits.shape[:-1] return_type = [("", "u1") for i in range(bits.shape[-1])] return np.ndarray(return_shape, dtype=return_type, buffer=bits)
def getSignalNumber(barray_unpacked, barray, start_bit, signalsize, isByteorderIntel, isValuetypeiSigned, factor, offset): # just guard againts unhandled (yet) intel bytecode # motorola only for now if 1 == isByteorderIntel: raise UserWarning # barray_msb = barray[start_bit:start_bit] start_bit_idx = getArrayIdxFromStartBit(start_bit) barray_msb = barray_unpacked[start_bit_idx] start_field_count = (start_bit+1) % 8 if start_field_count == 0 : start_field_count = 8 factor_number = int(factor) if float(factor).is_integer() else float(factor) offset_number = int(offset) if float(offset).is_integer() else float(offset) signal_number_bits = barray_unpacked[start_bit_idx:start_bit_idx+signalsize] no_of_padding = signalsize % 8 signal_number_bits = np.concatenate((np.array(no_of_padding*[0],dtype=np.uint8), signal_number_bits)) signal_number = np.packbits(signal_number_bits) if len(signal_number) < 8: signal_number = np.concatenate((signal_number, np.array((8-len(signal_number))*[0],dtype=np.uint8))) signal_number = signal_number.view(np.uint64).tolist()[0] # same field end # if signalsize <= start_field_count: # signal_number = barray[start_bit:start_bit-signalsize+1] # # print type(factor), type(offset) # # return signal_number # else: # barray_map = BitVector(0) # end_field_count = (signalsize - start_field_count) % 8 # field_count = int(math.floor((signalsize - start_field_count) / 8)) # end_bit_first_field =int( math.floor(start_bit/8)*8) # #first field # # print signalsize-1,signalsize-start_field_count,start_bit,end_bit_first_field # barray_map[signalsize-1:signalsize-start_field_count] =( barray[start_bit:end_bit_first_field]) # #intermediary field(s): # running_index = end_bit_first_field + 8 # running_index_map = signalsize - start_field_count - 1 # for i in range(0,field_count): # barray_map[running_index_map:running_index_map-7] =( barray[running_index+7:running_index]) # running_index = running_index + 8 # running_index_map = running_index_map - 8 # #last field # # check if actually all fields already taken # if running_index < 57 and running_index_map >= 0: # barray_map[end_field_count-1:0] = barray[running_index+7:running_index+8-end_field_count] # signal_number = barray_map[signalsize-1:0] if isValuetypeiSigned and barray_msb: signal_number = twosComplement(signal_number, signalsize) return signal_number*factor_number+offset_number
def dqt(ud16,lL0): """ decode quad tree integer to lon Lat Parameters ---------- lL : nd.array (2xN) longitude Latitude lL0 : nd.array (,2) lower left corner of the 1degree tile """ N = len(ud16) # offset from the lower left corner #d = lL-lL0[:,None] #dui8 = np.floor(d*256).astype('uint8') uh8 = ud16/256 ul8 = ud16-uh8*256 ud8 = (np.vstack((uh8,ul8)).T).astype('uint8') ud16 = np.unpackbits(ud8).reshape(N,16) ndu8 = np.empty((2,N,8)).astype('int') ndu8[0,:,:]=ud16[:,1::2] ndu8[1,:,:]=ud16[:,0::2] du8 = np.packbits(ndu8).reshape(2,N)/256. lL = lL0[:,None]+du8 return(lL)
def v2_apply_symmetry(self, symmetry, content): """ Apply a random symmetry to a v2 record. """ assert symmetry >= 0 and symmetry < 8 # unpack the record. (ver, probs, planes, to_move, winner) = self.v2_struct.unpack(content) planes = np.unpackbits(np.frombuffer(planes, dtype=np.uint8)) # We use the full length reflection tables to apply symmetry # to all 16 planes simultaneously planes = planes[self.full_reflection_table[symmetry]] assert len(planes) == 19*19*16 planes = np.packbits(planes) planes = planes.tobytes() probs = np.frombuffer(probs, dtype=np.float32) # Apply symmetries to the probabilities. probs = probs[self.prob_reflection_table[symmetry]] assert len(probs) == 362 probs = probs.tobytes() # repack record. return self.v2_struct.pack(ver, probs, planes, to_move, winner)
def _pandas_to_bucket(df, symbol, initial_image): rtn = {SYMBOL: symbol, VERSION: CHUNK_VERSION_NUMBER, COLUMNS: {}, COUNT: len(df)} end = to_dt(df.index[-1].to_datetime()) if initial_image : if 'index' in initial_image: start = min(to_dt(df.index[0].to_datetime()), initial_image['index']) else: start = to_dt(df.index[0].to_datetime()) image_start = initial_image.get('index', start) image = {k: v for k, v in initial_image.items() if k != 'index'} rtn[IMAGE_DOC] = {IMAGE_TIME: image_start, IMAGE: initial_image} final_image = TickStore._pandas_compute_final_image(df, initial_image, end) else: start = to_dt(df.index[0].to_datetime()) final_image = {} rtn[END] = end rtn[START] = start logger.warning("NB treating all values as 'exists' - no longer sparse") rowmask = Binary(lz4.compressHC(np.packbits(np.ones(len(df), dtype='uint8')))) recs = df.to_records(convert_datetime64=False) for col in df: array = TickStore._ensure_supported_dtypes(recs[col]) col_data = {} col_data[DATA] = Binary(lz4.compressHC(array.tostring())) col_data[ROWMASK] = rowmask col_data[DTYPE] = TickStore._str_dtype(array.dtype) rtn[COLUMNS][col] = col_data rtn[INDEX] = Binary(lz4.compressHC(np.concatenate(([recs['index'][0].astype('datetime64[ms]').view('uint64')], np.diff(recs['index'].astype('datetime64[ms]').view('uint64'))) ).tostring())) return rtn, final_image
def parseSignal_new(barray, barray_unpacked, signal_names, signals): signal_list ={} # for signal in signal_names: for signal in signals: signal_number = 0 start_bit = signal._startbit signalsize = signal._signalsize start_bit_idx = getArrayIdxFromStartBit(start_bit) barray_msb = barray_unpacked[start_bit_idx] start_field_count = (start_bit+1) % 8 if start_field_count == 0 : start_field_count = 8 signal_number_bits = barray_unpacked[start_bit_idx:start_bit_idx+signalsize] no_of_padding = signalsize % 8 signal_number_bits = np.concatenate((np.array(no_of_padding*[0],dtype=np.uint8), signal_number_bits)) signal_number = np.packbits(signal_number_bits) if len(signal_number) < 8: signal_number = np.concatenate((signal_number, np.array((8-len(signal_number))*[0],dtype=np.uint8))) signal_number = signal_number.view(np.uint64).tolist()[0] signal_list[signal._name] = signal_number return signal_list
def convert_v1_to_v2(self, text_item): """ Convert v1 text format to v2 packed binary format Converts a set of 19 lines of text into a byte string [[plane_1],[plane_2],...],... [probabilities],... winner,... """ # We start by building a list of 16 planes, # each being a 19*19 == 361 element array # of type np.uint8 planes = [] for plane in range(0, 16): # first 360 first bits are 90 hex chars, encoded MSB hex_string = text_item[plane][0:90] array = np.unpackbits(np.frombuffer( bytearray.fromhex(hex_string), dtype=np.uint8)) # Remaining bit that didn't fit. Encoded LSB so # it needs to be specially handled. last_digit = text_item[plane][90] if not (last_digit == "0" or last_digit == "1"): return False, None # Apply symmetry and append planes.append(array) planes.append(np.array([last_digit], dtype=np.uint8)) # We flatten to a single array of len 16*19*19, type=np.uint8 planes = np.concatenate(planes) # and then to a byte string planes = np.packbits(planes).tobytes() # Get the 'side to move' stm = text_item[16][0] if not(stm == "0" or stm == "1"): return False, None stm = int(stm) # Load the probabilities. probabilities = np.array(text_item[17].split()).astype(np.float32) if np.any(np.isnan(probabilities)): # Work around a bug in leela-zero v0.3, skipping any # positions that have a NaN in the probabilities list. return False, None if not(len(probabilities) == 362): return False, None probs = probabilities.tobytes() if not(len(probs) == 362 * 4): return False, None # Load the game winner color. winner = float(text_item[18]) if not(winner == 1.0 or winner == -1.0): return False, None winner = int((winner + 1) / 2) version = struct.pack('i', 1) return True, self.v2_struct.pack(version, probs, planes, stm, winner)
def test_packbits_large(): # test data large enough for 16 byte vectorization a = np.array([ 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0 ]) a = a.repeat(3) for dtype in '?bBhHiIlLqQ': arr = np.array(a, dtype=dtype) b = np.packbits(arr, axis=None) assert_equal(b.dtype, np.uint8) r = [ 252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252, 113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255, 227, 142, 7, 31, 142, 63, 28, 126, 56, 227, 240, 0, 227, 128, 63, 224, 14, 56, 252, 112, 56, 255, 241, 248, 3, 240, 56, 224, 112, 63, 255, 255, 199, 224, 14, 0, 31, 143, 192, 3, 255, 199, 0, 1, 255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15, 199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227, 129, 248, 227, 129, 199, 31, 128 ] assert_array_equal(b, r) # equal for size being multiple of 8 assert_array_equal(np.unpackbits(b)[:-4], a) # check last byte of different remainders (16 byte vectorization) b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)] assert_array_equal( b, [128, 128, 128, 31, 30, 28, 24, 16, 0, 0, 0, 199, 198, 196, 192]) arr = arr.reshape(36, 25) b = np.packbits(arr, axis=0) assert_equal(b.dtype, np.uint8) assert_array_equal( b, [[ 190, 186, 178, 178, 150, 215, 87, 83, 83, 195, 199, 206, 204, 204, 140, 140, 136, 136, 8, 40, 105, 107, 75, 74, 88 ], [ 72, 216, 248, 241, 227, 195, 202, 90, 90, 83, 83, 119, 127, 109, 73, 64, 208, 244, 189, 45, 41, 104, 122, 90, 18 ], [ 113, 120, 248, 216, 152, 24, 60, 52, 182, 150, 150, 150, 146, 210, 210, 246, 255, 255, 223, 151, 21, 17, 17, 131, 163 ], [ 214, 210, 210, 64, 68, 5, 5, 1, 72, 88, 92, 92, 78, 110, 39, 181, 149, 220, 222, 218, 218, 202, 234, 170, 168 ], [ 0, 128, 128, 192, 80, 112, 48, 160, 160, 224, 240, 208, 144, 128, 160, 224, 240, 208, 144, 144, 176, 240, 224, 192, 128 ]]) b = np.packbits(arr, axis=1) assert_equal(b.dtype, np.uint8) assert_array_equal( b, [[252, 127, 192, 0], [7, 252, 15, 128], [240, 0, 28, 0], [255, 128, 0, 128], [192, 31, 255, 128], [142, 63, 0, 0], [255, 240, 7, 0], [7, 224, 14, 0], [126, 0, 224, 0], [255, 255, 199, 0], [56, 28, 126, 0], [113, 248, 227, 128], [227, 142, 63, 0], [0, 28, 112, 0], [15, 248, 3, 128], [28, 126, 56, 0], [56, 255, 241, 128], [240, 7, 224, 0], [227, 129, 192, 128], [255, 255, 254, 0], [126, 0, 224, 0], [3, 241, 248, 0], [0, 255, 241, 128], [128, 0, 255, 128], [224, 1, 255, 128], [248, 252, 126, 0], [0, 7, 3, 128], [224, 113, 248, 0], [0, 252, 127, 128], [142, 63, 224, 0], [224, 14, 63, 0], [7, 3, 128, 0], [113, 255, 255, 128], [28, 113, 199, 0], [7, 227, 142, 0], [14, 56, 252, 0]]) arr = arr.T.copy() b = np.packbits(arr, axis=0) assert_equal(b.dtype, np.uint8) assert_array_equal( b, [[ 252, 7, 240, 255, 192, 142, 255, 7, 126, 255, 56, 113, 227, 0, 15, 28, 56, 240, 227, 255, 126, 3, 0, 128, 224, 248, 0, 224, 0, 142, 224, 7, 113, 28, 7, 14 ], [ 127, 252, 0, 128, 31, 63, 240, 224, 0, 255, 28, 248, 142, 28, 248, 126, 255, 7, 129, 255, 0, 241, 255, 0, 1, 252, 7, 113, 252, 63, 14, 3, 255, 113, 227, 56 ], [ 192, 15, 28, 0, 255, 0, 7, 14, 224, 199, 126, 227, 63, 112, 3, 56, 241, 224, 192, 254, 224, 248, 241, 255, 255, 126, 3, 248, 127, 224, 63, 128, 255, 199, 142, 252 ], [ 0, 128, 0, 128, 128, 0, 0, 0, 0, 0, 0, 128, 0, 0, 128, 0, 128, 0, 128, 0, 0, 0, 128, 128, 128, 0, 128, 0, 128, 0, 0, 0, 128, 0, 0, 0 ]]) b = np.packbits(arr, axis=1) assert_equal(b.dtype, np.uint8) assert_array_equal( b, [[190, 72, 113, 214, 0], [186, 216, 120, 210, 128], [178, 248, 248, 210, 128], [178, 241, 216, 64, 192], [150, 227, 152, 68, 80], [215, 195, 24, 5, 112], [87, 202, 60, 5, 48], [83, 90, 52, 1, 160], [83, 90, 182, 72, 160], [195, 83, 150, 88, 224], [199, 83, 150, 92, 240], [206, 119, 150, 92, 208], [204, 127, 146, 78, 144], [204, 109, 210, 110, 128], [140, 73, 210, 39, 160], [140, 64, 246, 181, 224], [136, 208, 255, 149, 240], [136, 244, 255, 220, 208], [8, 189, 223, 222, 144], [40, 45, 151, 218, 144], [105, 41, 21, 218, 176], [107, 104, 17, 202, 240], [75, 122, 17, 234, 224], [74, 90, 131, 170, 192], [88, 18, 163, 168, 128]]) # result is the same if input is multiplied with a nonzero value for dtype in 'bBhHiIlLqQ': arr = np.array(a, dtype=dtype) rnd = np.random.randint(low=np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=arr.size, dtype=dtype) rnd[rnd == 0] = 1 arr *= rnd.astype(dtype) b = np.packbits(arr, axis=-1) assert_array_equal(np.unpackbits(b)[:-4], a) assert_raises(TypeError, np.packbits, np.array(a, dtype=float))
def handle_msg(self, msg_pmt): msg = pmt.cdr(msg_pmt) if not pmt.is_u8vector(msg): print('[ERROR] Received invalid message type. Expected u8vector') return bits = np.array(pmt.u8vector_elements(msg)) ltu = bits[:210].reshape((15, 14)).transpose() # Decode BCH(15,5,7) if not all((decode_bch15(ltu[j, :]) for j in range(14))): # Decode failure if self.verbose: print('BCH decode failure') return ltu = np.fliplr(ltu[:, -5:]).ravel() hdr = LTUFrameHeader.parse(np.packbits(ltu)) ltu_crc = np.concatenate((ltu[:-5], np.array([1, 0, 1, 1, 0, 1, 1]))) ltu_crc = ltu_crc.reshape((9, 8)) if self.buggy_crc: # Reverse byte ordering for CRC5 calculation ltu_crc = np.flipud(ltu_crc) if self.buggy_crc: # Force CRC5 bugs ltu_crc[4, :] = ltu_crc[3, :] # CRC5 calculation crc = 0x1F for bit in ltu_crc.ravel(): # Check most significant bit in the CRC buffer and save # in a variable. c = crc & 0x10 # Shift variable to make the compare op. possible (see beneath). c >>= 4 # Shift CRC to the left and write 0 into the least significant bit. crc <<= 1 if c != bit: crc ^= 0x15 # CRC polynomial crc &= 0x1F if crc != hdr.CRC5: if self.verbose: print('CRC5 fail') return if self.verbose: print(hdr) if hdr.PduLength == 0: return codewords_per_block = 16 uncoded = False if hdr.AiTypeSrc == 0: uncoded = True elif hdr.AiTypeSrc == 1: data_bits_per_codeword = 11 # BCH(15,11,3) bch_d = 3 elif hdr.AiTypeSrc == 2: data_bits_per_codeword = 7 # BCH(15,7,5) bch_d = 5 elif hdr.AiTypeSrc == 3: data_bits_per_codeword = 5 # BCH(15,5,7) bch_d = 7 else: if self.verbose: print('Invalid AiTypeSrc') return if uncoded: pdu_bytes = bits[210:210 + hdr.PduLength * 8] pdu_bytes = pdu_bytes.reshape((hdr.PduLength, 8)) pdu_bytes = np.fliplr(pdu_bytes) else: data_bytes_per_block = (codewords_per_block * data_bits_per_codeword // 8) num_blocks = int( np.ceil(float(hdr.PduLength) / data_bytes_per_block)) blocks = list() for k in range(num_blocks): block = bits[210 + k * 16 * 15:210 + (k + 1) * 16 * 15].reshape((15, 16)) if bch_d: block = block.transpose() if not bch_d: print(block) # Decode BCH if (bch_d and not all( (decode_bch15(block[j, :], d=bch_d) for j in range(16)))): # Decode failure if self.verbose: print('BCH decode failure') return if bch_d: blocks.append(block[:, -data_bits_per_codeword:].ravel()) else: blocks.append(block.ravel()) pdu_bytes = np.concatenate(blocks) pdu_bytes = pdu_bytes.reshape( (data_bytes_per_block * num_blocks, 8)) pdu_bytes = np.fliplr(pdu_bytes) # Drop 0xDB padding bytes at the end pdu_bytes = pdu_bytes[:hdr.PduLength] if not bch_d: print(pdu_bytes) # CRC13 crc = 0x1FFF pdu_crc = np.flipud(pdu_bytes) if self.buggy_crc else pdu_bytes for bit in pdu_crc.ravel(): # Check most significant bit in the CRC buffer and save it # in a variable. c = crc & 0x1000 # Shift variable to make the compare op. possible (see beneath). c >>= 12 # Shift CRC to the left and write 0 into the least significant bit. crc <<= 1 if (c or bit if self.buggy_crc else c != bit): crc ^= 0x1CF5 # CRC polynomial crc &= 0x1FFF if crc != hdr.CRC13: if self.verbose: print('CRC13 fail') return pdu = np.packbits(pdu_bytes) pdu_tags = pmt.make_dict() pdu_tags = pmt.dict_add(pdu_tags, pmt.intern('SNET SrcId'), pmt.from_long(hdr.SrcId)) self.message_port_pub( pmt.intern('out'), pmt.cons(pdu_tags, pmt.init_u8vector(len(pdu), pdu)))
def decrypt_string(input_bits): to_decrypt = np.array(list(input_bits)).reshape(-1, 8).astype(np.uint8) decrypted = np.apply_along_axis(DES.apply, 1, to_decrypt, DES.key_test, False).astype(np.uint8) packed = np.packbits(decrypted) return "".join([chr(item) for item in packed])
def pack_shot_data(shot_data): return np.packbits(shot_data, axis=1)
def SimulateBER(snrArray, txBin, Npixels, modulatioInfo): nSNR = len(snrArray) rxDataArray = np.empty(len(txBin)) BitErrorArray = np.empty(2) berArray = np.empty(0) mod = 0 # Create Modulation Scheme Object if (modulatioInfo.get("mod") == "PSK"): mod = komm.PSKModulation(modulatioInfo.get("order")) if (modulatioInfo.get("mod") == 'QAM'): mod = komm.QAModulation(modulatioInfo.get("order")) # Normalize energy per symbol baseAmplitude = 1 / (np.sqrt(mod.energy_per_symbol)) mod = komm.QAModulation(modulatioInfo.get("order"), baseAmplitude) print("Modulation to be used:") print( str(modulatioInfo.get("order")) + " " + str(modulatioInfo.get("mod"))) print("Bits Per Symbol: " + str(mod.bits_per_symbol)) print("Energy Per Symbol: " + str(mod.energy_per_symbol)) print("\n") # Modulate Data txData = mod.modulate(txBin) # For each transmision for i in range(nSNR): # Calculate based on db awgn = komm.AWGNChannel(snr=10**(snrArray[i] / 10.)) # Simulate noise in channel rxData = awgn(txData) # Demodulate Data rxBin = mod.demodulate(rxData) # Append demodulated data as a new row rxDataArray = np.vstack([rxDataArray, rxBin]) awgn = komm.AWGNChannel(snr=10**(snrArray[10] / 10.)) rx_data = awgn(txData) rx_bin = mod.demodulate(rx_data) # Plot few rx bits plt.figure() plt.axes().set_aspect("equal") plt.scatter(rx_data[:10000].real, rx_data[:10000].imag, s=1, marker=".") plt.show() rx_im = np.packbits(rx_bin).reshape(tx_im.size[1], tx_im.size[0]) plt.figure() plt.imshow(np.array(rx_im), cmap="gray", vmin=0, vmax=255) plt.show() # Measuring Bit Error Ratio # For each transmision for j in range(1, nSNR + 1): # Reset number of bit errors BitErrorCount = 0 # Compute bit errors # i.e For each pixel for i in range(Npixels * 8): # If pixel value does not match if (rxDataArray[j][i] != txBin[i]): # Increment error count BitErrorCount += 1 # Calculate bit error rate for transmision ber = BitErrorCount / (Npixels * 8) berArray = np.append(berArray, ber) # Append new dimension containing bit count and bit error rate BitErrorArray = np.vstack([BitErrorArray, [BitErrorCount, ber]]) print("Bit Error Array:") print(BitErrorArray) print("\n") plt.figure() plt.scatter(snrArray, berArray) #plot points plt.plot(snrArray, berArray) #plot lines plt.yscale("log") plt.ylabel('$BER$') plt.xlabel('$SNR$') plt.title((str(modulatioInfo.get("order")) + " " + str(modulatioInfo.get("mod")))) plt.grid(True) #plt.show() # Calculate theoretical BER # Modify k parameter i.e. bits per symbol k = mod.bits_per_symbol errfcDataSet = np.empty(0) # For Each SNR for i in range(nSNR): # Calculate Theorethical BER errfc = 0.5 * scipy.special.erfc( math.sqrt((10**(snrArray[i] / 10)) / k)) errfcDataSet = np.append(errfcDataSet, errfc) plt.plot(snrArray, errfcDataSet, color='r') plt.show() print("Errfc Data Set:") print(errfcDataSet) print("\n") return berArray, errfcDataSet
def findHeader(info, header): pos = np.argmax(np.correlate(np.unpackbits(info), np.unpackbits(header))) rcv_array = np.packbits(np.roll(np.unpackbits(info), -pos)) return rcv_array
def encode(source, message, bit_split, source_type="array", message_type="array"): """ Encodes the message into the source. :param source: Source :type source: numpy.array :param message: Message :type message: numpy.array :param bit_split: Bit split :type bit_split: int :param source_type: Source type :type source_type: str :param message_type: Message type :type message_type: str :return: Encoded image :rtype: numpy.array """ if bit_split > 8: raise Exception("Bit Split must be >= 1 and <= 8") if source_type == "array": source, source_original_shape = Source.from_array(source) elif source_type == "image": source, source_original_shape = Source.from_image(source) else: raise Exception("Source type not valid", source_type) if message_type == "array": (message, padding), message_extras = Message.from_array(message, bit_split) elif message_type == "image": (message, padding), message_extras = Message.from_image(message, bit_split) elif message_type == "text": (message, padding), message_extras = Message.from_text(message, bit_split) elif message_type == "text_file": (message, padding), message_extras = Message.from_text_file( message, bit_split) elif message_type == "text_stream": (message, padding), message_extras = Message.from_text_stream( message, bit_split) else: raise Exception("Message type not valid", message_type) bit_split_str = "{0:04b}".format(bit_split) padding = "{0:04b}".format(padding) message_length = "{0:032b}".format(message.shape[0]) # message height mt = Steganography.type_map[message_type] extras = [] for extra in message_extras: e = "{0:016b}".format(extra) extras.append(e) num_extras = "{0:04b}".format(len(extras)) header = [bit_split_str, padding, message_length, mt, num_extras] header.extend(extras) header = np.array(list("".join(header))) header = np.expand_dims(header, axis=1) if header.shape[0] + message.shape[0] > source.shape[0]: e = ( "Message size too large!", str(header.shape[0] + message.shape[0]), ">", str(source.shape[0]), ) raise Exception(e) # write header encoded = np.copy(source) encoded[:header.shape[0], -1:] = header # write message offset = header.shape[0] encoded[offset:offset + message.shape[0], -bit_split:] = message # converts back to regular numbers and reshapes to original size encoded = np.packbits(encoded) encoded = encoded.reshape(source_original_shape) return encoded
def seed_rl_preprocessing(observation): observation = np.expand_dims(observation, axis=0) data = np.packbits(observation, axis=-1) # This packs to uint8 if data.shape[-1] % 2 == 1: data = np.pad(data, [(0, 0)] * (data.ndim - 1) + [(0, 1)], 'constant') return data.view(np.uint16)
import numpy as np import os Ts = np.arange(1.0,4.1,0.1) Tss = ['{:.1f}'.format(T) for T in Ts] for T in Tss: cmd = '~/Programming/ising/install/bin/ising -d 2 -L 20 -T {0} --nmeas 1000 --nmcs 20000000 --ieq 5000 --dyn 0 --print-state'.format(T) outfname = '../data_paper/configurations_{}_glauber.npz'.format(T) print(cmd) os.system('rm estats/*') os.system(cmd) X = np.zeros((20000, 20, 20), dtype=np.uint8) for i in range(20000): snapshot = np.loadtxt('estats/estat{}.txt'.format(i+1), dtype=int) X[i][snapshot>0] = 1 print(outfname, np.mean(X)) np.savez_compressed(outfname, np.packbits(X, axis=-1)) os.system('rm estats/*')
def pack(a): """Pack a boolean array *a* so that it takes 8x less space.""" return np.packbits(a.view(np.uint8))
def compress_indices(self, width=24): # compression that takes less space for sparse profiles # instead of saving each element of an array as 1 byte, we save indices of all the elements that are True # saving one index requires more than 16 bytes (since 16 bytes allow for 65536 values, and a transcriptome can be larger) # however, using 32 bytes per index seems like a waste of space # therefore, I'll be using 20 bytes per index; this allows for 1 mln values # # the format is: # first, 4 bytes keep the length of the profile in uint32 format # then, 4 bytes keep total number of indexes of True values (N) # then, one array of length 20 * N keeps indices positions # then, last 16 bytes keep MD5 checksum true_indices = np.where(self.values) # get indices true_indices = true_indices[ 0] # for some reason np.where returns a tuple true_indices = true_indices.astype( np.uint32) # convert to unsigned integers # split each uint32 value into 4 uint8 values so that we can turn them into binary in a vectorized manner N_indices = true_indices.shape[0] binary_array = np.zeros((N_indices, 32), dtype=np.bool) # iterate through all the indices and turn each into bin and then into 4 uint8 values for i, value in enumerate(true_indices): bit_string = struct.pack('I', value) curr_uint8 = np.frombuffer(bit_string, dtype=np.uint8) binary_array[i, :] = np.unpackbits(curr_uint8) # remove extra unused bytes: shorten each value from 32 bits to the specified width (20 by default) total_count_per_byte = binary_array.sum(axis=0) assert (total_count_per_byte[width:] == 0 ).all(), "some indices are larger than the chosen width!" shortened_binary_array = binary_array[:, 0:width] flattened_binary_array = shortened_binary_array.flatten() # make the total array of K*8 length so that we can compress it to bytes total_number_of_values = flattened_binary_array.shape[0] if total_number_of_values % 8 != 0: new_number_of_values = ((total_number_of_values // 8) + 1) * 8 binary_bytes_array = np.zeros(new_number_of_values, dtype=np.bool) binary_bytes_array[ 0:total_number_of_values] = flattened_binary_array else: binary_bytes_array = flattened_binary_array length_uint32 = np.array([self.values.shape[0]], dtype=np.uint32) length_bitstring = length_uint32.tobytes() N_indices_uint32 = np.array([N_indices], dtype=np.uint32) N_indices_bitstring = N_indices_uint32.tobytes() width_uint32 = np.array([width], dtype=np.uint32) width_bitstring = width_uint32.tobytes() indices_packbits = np.packbits(binary_bytes_array) indices_bitstring = indices_packbits.tobytes() info_bitstring = length_bitstring + N_indices_bitstring + width_bitstring + indices_bitstring md5 = hashlib.md5() md5.update(info_bitstring) md5_checksum = md5.digest() assert ( md5.digest_size == 16 ) # md5 checksum is always 16 bytes long, see wiki: https://en.wikipedia.org/wiki/MD5 full_bytestring = info_bitstring + md5_checksum self.bytestring_indices = full_bytestring self.md5_indices = md5_checksum
def pack(array): if not isinstance(array, numpy.ndarray): array = numpy.array(array, dtype=oamap.generator.Masked.maskdtype) return numpy.packbits(array != oamap.generator.Masked.maskedvalue)
def __init__(self, resolution=(400, 300), colour='black', cs_pin=CS0_PIN, dc_pin=DC_PIN, reset_pin=RESET_PIN, busy_pin=BUSY_PIN, h_flip=False, v_flip=False): if resolution not in _RESOLUTION.keys(): raise ValueError('Resolution {}x{} not supported!'.format(*resolution)) self.resolution = resolution self.width, self.height = resolution self.cols, self.rows, self.rotation = _RESOLUTION[resolution] if colour not in ('red', 'black', 'yellow'): raise ValueError('Colour {} is not supported!'.format(colour)) self.colour = colour self.buf = numpy.zeros((self.height, self.width), dtype=numpy.uint8) self.buf_zero = numpy.zeros((self.height, self.width), dtype=numpy.uint8) self.buf_black = numpy.packbits(numpy.where(self.buf_zero == WHITE, 0, 1)).tolist() self.buf_white = numpy.packbits(numpy.where(self.buf_zero == BLACK, 0, 1)).tolist() self.border_colour = 0 self.dc_pin = dc_pin self.reset_pin = reset_pin self.busy_pin = busy_pin self.cs_pin = cs_pin self.h_flip = h_flip self.v_flip = v_flip self._gpio_setup = False self._luts = { 'clear-black': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b00010000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT0 - Black 0b00010000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT0 - Black 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT3 - Red 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT4 - VCOM # Duration | Repeat # A B C D | 0, 100, 0, 0, 1, # 2 bring in the black 0, 0, 0, 0, 0, # 0 Flash 0, 0, 0, 0, 0, # 1 clear ], 'clear-white': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b10100000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT0 - Black 0b10100000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT0 - Black 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT3 - Red 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT4 - VCOM # Duration | Repeat # A B C D | # 0, 100, 0, 0, 1, # 2 bring in the black 0, 66, 0, 0, 2, # 2 bring in the black 0, 0, 0, 0, 0, # 1 clear 0, 0, 0, 0, 0, # 1 clear ], 'draw-from-black': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b00010000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT0 - Black 0b10000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUTT1 - White 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT3 - Red 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT4 - VCOM # Duration | Repeat # A B C D | 50, 0, 0, 0, 2, # 2 bring in the invert 0, 0, 0, 0, 0, # 0 clear 0, 0, 0, 0, 0, # 1 clear ], 'draw-from-white': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b00010000, 0b00000000, 0b00000000, 0b00010000, 0b00010011, 0b00000000, 0b00000000, # LUT0 - Black 0b10000000, 0b00000000, 0b00000000, 0b00000000, 0b00000011, 0b00000000, 0b00000000, # LUTT1 - White 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT3 - Red 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT4 - VCOM # Duration | Repeat # A B C D | 20, 20, 0, 0, 4, # 2 bring in the black 0, 0, 0, 0, 0, # 1 clear 0, 0, 0, 0, 0, # 1 clear ], 'black': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b01001000, 0b10100000, 0b00010000, 0b00010000, 0b00010011, 0b00000000, 0b00000000, # LUT0 - Black 0b01001000, 0b10100000, 0b10000000, 0b00000000, 0b00000011, 0b00000000, 0b00000000, # LUTT1 - White 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b01001000, 0b10100101, 0b00000000, 0b10111011, 0b00000000, 0b00000000, 0b00000000, # LUT3 - Red 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT4 - VCOM # Duration | Repeat # A B C D | 16, 4, 4, 4, 4, # 0 Flash 16, 4, 4, 4, 4, # 1 clear 4, 8, 8, 16, 16, # 2 bring in the black 0, 0, 0, 0, 0, # 3 time for red 0, 0, 0, 0, 0, # 4 final black sharpen phase 0, 0, 0, 0, 0, # 5 0, 0, 0, 0, 0, # 6 ], 'red': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b01001000, 0b10100000, 0b00010000, 0b00010000, 0b00010011, 0b00000000, 0b00000000, # LUT0 - Black 0b01001000, 0b10100000, 0b10000000, 0b00000000, 0b00000011, 0b00000000, 0b00000000, # LUTT1 - White 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b01001000, 0b10100101, 0b00000000, 0b10111011, 0b00000000, 0b00000000, 0b00000000, # LUT3 - Red 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # LUT4 - VCOM # Duration | Repeat # A B C D | 64, 12, 32, 12, 6, # 0 Flash 16, 8, 4, 4, 6, # 1 clear 4, 8, 8, 16, 16, # 2 bring in the black 2, 2, 2, 64, 32, # 3 time for red 2, 2, 2, 2, 2, # 4 final black sharpen phase 0, 0, 0, 0, 0, # 5 0, 0, 0, 0, 0 # 6 ], 'yellow': [ # Phase 0 Phase 1 Phase 2 Phase 3 Phase 4 Phase 5 Phase 6 # A B C D A B C D A B C D A B C D A B C D A B C D A B C D 0b11111010, 0b10010100, 0b10001100, 0b11000000, 0b11010000, 0b00000000, 0b00000000, # LUT0 - Black 0b11111010, 0b10010100, 0b00101100, 0b10000000, 0b11100000, 0b00000000, 0b00000000, # LUTT1 - White 0b11111010, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, 0b00000000, # IGNORE 0b11111010, 0b10010100, 0b11111000, 0b10000000, 0b01010000, 0b00000000, 0b11001100, # LUT3 - Yellow (or Red) 0b10111111, 0b01011000, 0b11111100, 0b10000000, 0b11010000, 0b00000000, 0b00010001, # LUT4 - VCOM # Duration | Repeat # A B C D | 64, 16, 64, 16, 8, 8, 16, 4, 4, 16, 8, 8, 3, 8, 32, 8, 4, 0, 0, 16, 16, 8, 8, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] }
def pack_state(self, state): black = np.packbits(state == Board.STONE_BLACK) white = np.packbits(state == Board.STONE_WHITE) empty = np.packbits(state == Board.STONE_EMPTY) image = np.concatenate((black, white, empty)) return bytes(image)
def test_count(self, kwargs): packed = np.packbits(self.x) unpacked = np.unpackbits(packed, **kwargs) assert_equal(unpacked.dtype, np.uint8) assert_array_equal(unpacked, self.padded1[:-1])
def pixels_to_raster(pixels): packed = np.packbits(pixels) inverted = np.invert(packed) return inverted.tolist()
def decode(source, source_type="array"): """ Decodes the source and returns the hidden message. :param source: The source :type source: numpy.array :param source_type: Source type :type source_type: str :return: Hidden message :rtype: numpy.array """ if source_type == "array": source, source_original_shape = Source.from_array(source) elif source_type == "image": source, source_original_shape = Source.from_image(source) else: raise Exception("Source type not valid", source_type) offset = 0 bit_split = source[:offset + 4, -1:] bit_split = bit_split.squeeze() bit_split = "".join(str(num) for num in bit_split) bit_split = int(bit_split, 2) offset = offset + 4 padding = source[offset:offset + 4, -1:] padding = padding.squeeze() padding = "".join([str(num) for num in padding]) padding = int(padding, 2) offset = offset + 4 message_length = source[offset:offset + 32, -1:] message_length = message_length.squeeze() message_length = "".join([str(num) for num in message_length]) message_length = int(message_length, 2) offset = offset + 32 message_type = source[offset:offset + 2, -1:] message_type = message_type.squeeze() message_type = "".join([str(num) for num in message_type]) message_type = Steganography.inv_type_map[message_type] offset = offset + 2 num_extras = source[offset:offset + 4, -1:] num_extras = num_extras.squeeze() num_extras = "".join([str(num) for num in num_extras]) num_extras = int(num_extras, 2) offset = offset + 4 extras = [] for extra in range(num_extras): extra = source[offset:offset + 16, -1:] extra = extra.squeeze() extra = "".join([str(num) for num in extra]) extras.append(int(extra, 2)) offset = offset + 16 message = source[offset:offset + message_length, -bit_split:] message = message.reshape((-1, ))[:-padding] message = np.packbits(message) return message, message_type, extras
def make_constellation(m): """ Create a constellation with m possible symbols where m must be a power of 2. Points are laid out in a cross grid. """ if not isinstance(m, int) or not is_odd_power_of_two(m) and m > 1: raise ValueError("m must be an odd power of 2 integer.") # Each symbol holds k bits. k = int(log(m) / log(2)) n = int(k / 2) + 1 mn = k - n s = int(pow(2, mn - 1)) # Determining how the constellation map should be build rect_map = [] const_map = [0 + 0j] * m if k == 1: const_map.append(complex(1, 0)) const_map.append(complex(-1, 0)) elif k == 3: # Do rectangular constellation mapping first for i in range(2 * pow(2, s)): i_bin = convert_to_binary(i, s + 1) for j in range(pow(2, s)): j_bin = convert_to_binary(j, s) rect_map.append((i_bin, j_bin)) # Make Complex Constellation using cross gray coding for x, y in rect_map: Irct, Qrct = rectG(x, y) z = np.packbits(np.append(y, x[::-1]), bitorder='little')[0] if Irct < 3: const_map[z] = complex(Irct, Qrct) else: Icr = -sign(Irct) * (4 - abs(Irct)) Qcr = sign(Qrct) * (abs(Qrct) + 2) const_map[z] = complex(Icr, Qcr) else: # Do rectangular constellation mapping first for i in range(pow(2, n)): i_bin = convert_to_binary(i, n) for j in range(pow(2, mn)): j_bin = convert_to_binary(j, mn) rect_map.append((i_bin, j_bin)) # Make Numpy Complex Constellation using cross gray coding for x, y in rect_map: Irct, Qrct = rectG(x, y) xy = x + y z = int("".join(str(w) for w in xy), 2) if abs(Irct) < (3 * s): const_map[z] = complex(Irct, Qrct) elif abs(Qrct) > s: Icr = sign(Irct) * (abs(Irct) - (2 * s)) Qcr = sign(Qrct) * ((4 * s) - abs(Qrct)) const_map[z] = complex(Icr, Qcr) else: Icr = sign(Irct) * ((4 * s) - abs(Irct)) Qcr = sign(Qrct) * (abs(Qrct) + (2 * s)) const_map[z] = complex(Icr, Qcr) return const_map
def __call__(self, roidb): fname, boxes, klass, is_crowd = roidb["file_name"], roidb[ "boxes"], roidb["class"], roidb["is_crowd"] assert boxes.ndim == 2 and boxes.shape[1] == 4, boxes.shape boxes = np.copy(boxes) im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname im = im.astype("float32") height, width = im.shape[:2] # assume floatbox as input assert boxes.dtype == np.float32, "Loader has to return float32 boxes!" if not self.cfg.DATA.ABSOLUTE_COORD: boxes[:, 0::2] *= width boxes[:, 1::2] *= height # augmentation: tfms = self.aug.get_transform(im) im = tfms.apply_image(im) points = box_to_point4(boxes) points = tfms.apply_coords(points) boxes = point4_to_box(points) if len(boxes): assert klass.max() <= self.cfg.DATA.NUM_CATEGORY, \ "Invalid category {}!".format(klass.max()) assert np.min(np_area(boxes)) > 0, "Some boxes have zero area!" ret = {"image": im} # Add rpn data to dataflow: try: if self.cfg.MODE_FPN: multilevel_anchor_inputs = self.get_multilevel_rpn_anchor_input( im, boxes, is_crowd) for i, (anchor_labels, anchor_boxes) in enumerate(multilevel_anchor_inputs): ret["anchor_labels_lvl{}".format(i + 2)] = anchor_labels ret["anchor_boxes_lvl{}".format(i + 2)] = anchor_boxes else: ret["anchor_labels"], ret[ "anchor_boxes"] = self.get_rpn_anchor_input( im, boxes, is_crowd) boxes = boxes[is_crowd == 0] # skip crowd boxes in training target klass = klass[is_crowd == 0] ret["gt_boxes"] = boxes ret["gt_labels"] = klass except MalformedData as e: log_once( "Input {} is filtered for training: {}".format(fname, str(e)), "warn") return None if self.cfg.MODE_MASK: # augmentation will modify the polys in-place segmentation = copy.deepcopy(roidb["segmentation"]) segmentation = [ segmentation[k] for k in range(len(segmentation)) if not is_crowd[k] ] assert len(segmentation) == len(boxes) # Apply augmentation on polygon coordinates. # And produce one image-sized binary mask per box. masks = [] width_height = np.asarray([width, height], dtype=np.float32) gt_mask_width = int(np.ceil(im.shape[1] / 8.0) * 8) # pad to 8 in order to pack mask into bits for polys in segmentation: if not self.cfg.DATA.ABSOLUTE_COORD: polys = [p * width_height for p in polys] polys = [tfms.apply_coords(p) for p in polys] masks.append( polygons_to_mask(polys, im.shape[0], gt_mask_width)) if len(masks): masks = np.asarray(masks, dtype='uint8') # values in {0, 1} masks = np.packbits(masks, axis=-1) else: # no gt on the image masks = np.zeros((0, im.shape[0], gt_mask_width // 8), dtype='uint8') ret['gt_masks_packed'] = masks # from viz import draw_annotation, draw_mask # viz = draw_annotation(im, boxes, klass) # for mask in masks: # viz = draw_mask(viz, mask) # tpviz.interactive_imshow(viz) return ret
def img_frombytes(data): size = data.shape[::-1] databytes = np.packbits(data, axis=1) return Image.frombytes(mode='1', size=size, data=databytes)
def pack_bits(arr: "np.ndarray", pad: bool = True) -> bytes: """Pack a binary :class:`numpy.ndarray` for use with *Pixel Data*. .. versionadded:: 1.2 Should be used in conjunction with (0028,0100) *Bits Allocated* = 1. .. versionchanged:: 2.1 Added the `pad` keyword parameter and changed to allow `arr` to be 2 or 3D. Parameters ---------- arr : numpy.ndarray The :class:`numpy.ndarray` containing 1-bit data as ints. `arr` must only contain integer values of 0 and 1 and must have an 'uint' or 'int' :class:`numpy.dtype`. For the sake of efficiency it's recommended that the length of `arr` be a multiple of 8 (i.e. that any empty bit-padding to round out the byte has already been added). The input `arr` should either be shaped as (rows, columns) or (frames, rows, columns) or the equivalent 1D array used to ensure that the packed data is in the correct order. pad : bool, optional If ``True`` (default) then add a null byte to the end of the packed data to ensure even length, otherwise no padding will be added. Returns ------- bytes The bit packed data. Raises ------ ValueError If `arr` contains anything other than 0 or 1. References ---------- DICOM Standard, Part 5, :dcm:`Section 8.1.1<part05/chapter_8.html#sect_8.1.1>` and :dcm:`Annex D<part05/chapter_D.html>` """ if arr.shape == (0, ): return bytes() # Test array if not np.array_equal(arr, arr.astype(bool)): raise ValueError( "Only binary arrays (containing ones or zeroes) can be packed.") if len(arr.shape) > 1: arr = arr.ravel() # The array length must be a multiple of 8, pad the end if arr.shape[0] % 8: arr = np.append(arr, np.zeros(8 - arr.shape[0] % 8)) # Reshape so each row is 8 bits arr = np.reshape(arr, (-1, 8)) arr = np.fliplr(arr) arr = np.packbits(arr.astype('uint8')) packed: bytes = arr.tobytes() if pad: return packed + b'\x00' if len(packed) % 2 else packed return packed
def SimulateParityBits(snrArray, txBin, Npixels, modulatioInfo): nSNR = len(snrArray) rxBinDecoded = np.empty(0) rxIncorrect = True mod = 0 if (modulatioInfo.get("mod") == "PSK"): mod = komm.PSKModulation(modulatioInfo.get("order")) if (modulatioInfo.get("mod") == 'QAM'): mod = komm.QAModulation( modulatioInfo.get("order")) # add baseAmplitude print("Base Amplitude is: " + str(mod.energy_per_symbol)) # Normalize Enerhy per symbol baseAmplitude = 1 / (np.sqrt(mod.energy_per_symbol)) print("New Base Amplitude is: " + str(baseAmplitude)) mod = komm.QAModulation(modulatioInfo.get("order"), baseAmplitude) print("Modulation to be used:") print( str(modulatioInfo.get("order")) + " " + str(modulatioInfo.get("mod"))) print("Bits Per Symbol: " + str(mod.bits_per_symbol)) print("Energy Per Symbol: " + str(mod.energy_per_symbol)) print("\n") print("Simulating ARQ based on parity bit check!") print("Adding Parity Bits!") # Add parity bits # For each pixel for i in range(Npixels): startIndex = i * 8 # If the sum of on bits is not even if (((np.sum(txBin[startIndex:startIndex + 7])) % 2) != 0): # Change parity bit to 1 txBin[(startIndex + 7)] = 1 # The sum of on bits is even else: # Change parity bit to 0 txBin[(startIndex + 7)] = 0 # Modulate data txDataParity = mod.modulate(txBin) print("Simulating Transmision!") indexFactor = int(8 / mod.bits_per_symbol) berArray = np.empty(0) arqArray = np.empty(0) for c in range(nSNR): print("Simulating SNR: " + str(snrArray[c])) # Set Average Gausian Noise to reflect new SNR awgn = komm.AWGNChannel(snr=10**(snrArray[c] / 10.)) ARQ = 0 # For Each Symbol for i in range(Npixels): # Compute Index of the codeword startIndex = i * indexFactor # Until the Parity bit check is not passed while (rxIncorrect): # Simulate noise in the channel during transmision only rxData = awgn(txDataParity[startIndex:startIndex + indexFactor]) # Demodulate Data rxBin = mod.demodulate(rxData) # Check if parity = 0 if ((np.sum(rxBin) % 2) != 0): # Error During Transmision # Increment Request Counter ARQ += 1 else: # Passed parity check, assume data is correct # Append Data Bits to final binary array rxBinDecoded = np.append(rxBinDecoded, rxBin) # Set while loop flag to false indicating this codeword has been rx without error rxIncorrect = False #Set while loop flag to true to process next codeword rxIncorrect = True # Convert to real int rxBinDecoded = np.real(rxBinDecoded) rxBinDecoded = rxBinDecoded.astype(int) # For SNR 10 Plot graphs if (c == 0): # Plot few rx bits # plt.figure() # plt.axes().set_aspect("equal") # plt.scatter(rxBinDecoded[:10000].real,rxBinDecoded[:10000].imag,s=1,marker=".") # plt.show() rx_im = np.packbits(rxBinDecoded).reshape(tx_im.size[1], tx_im.size[0]) plt.figure() plt.imshow(np.array(rx_im), cmap="gray", vmin=0, vmax=255) plt.show() # Count Bit errors print("Computing BER: " + str(snrArray[c])) BitErrorCount = 0 # For each bit in the rx data for i in range(Npixels * 8): # If bit value does not match if (rxBinDecoded[i] != txBin[i]): # Increment error count BitErrorCount += 1 # Calculate bit error rate for the transmision berArray = np.append(berArray, (BitErrorCount / (Npixels * 8))) arqArray = np.append(arqArray, (ARQ / (Npixels * 8))) print("BER Array:") print(berArray) print("\n") print("ARQ Array:") print(arqArray) print("\n") plt.figure() plt.scatter(snrArray, berArray) #plot points plt.plot(snrArray, berArray) #plot lines plt.yscale("log") plt.ylabel('$BER$') plt.xlabel('$SNR$') plt.title((str(modulatioInfo.get("order")) + " " + str(modulatioInfo.get("mod")) + " BER")) plt.grid(True) # Calculate theoretical BER # Modify k parameter i.e. bits per symbol k = mod.bits_per_symbol errfcDataSet = np.empty(0) # For Each SNR for i in range(nSNR): # Calculate Theorethical BER errfc = 0.5 * scipy.special.erfc( math.sqrt((10**(snrArray[i] / 10)) / k)) errfcDataSet = np.append(errfcDataSet, errfc) plt.plot(snrArray, errfcDataSet, color='r') plt.show() plt.figure() plt.scatter(snrArray, arqArray) #plot points plt.plot(snrArray, arqArray) #plot lines plt.yscale("log") plt.ylabel('$ARQ Rate$') plt.xlabel('$SNR$') plt.title((str(modulatioInfo.get("order")) + " " + str(modulatioInfo.get("mod")) + " ARQ/nBits")) plt.grid(True) return berArray, arqArray, rxBinDecoded
def get_action(logits): probs = 1. / (1. + np.exp(-logits.detach().numpy())) bits = np.array([np.random.uniform() <= p for p in probs]).astype(int) return int(np.packbits(bits)[0] >> 2)
def bwr_bytes(image): """Converts the image to the closest 2-bit black, white, or red bytes.""" indices = _color_indices(image) bwr_image_data = BWR_2_BIT[indices.reshape((image.height * image.width))] return packbits(bwr_image_data)
return fc2 # prepare numpy arrays for testing data = np.load("data/bnn-5775.data.npz") images = data["images"][:test_size] labels = data["labels"][:test_size] num_images = images.shape[0] params = np.load("data/bnn-5775.params.npz") # prepare packed arrays packed_params = {} for name in params: if "w_fc" in name: packed_params[name] = np.packbits(params[name].copy().astype(np.bool), axis=1, bitorder="little").view(np.uint32) elif "w_conv1" in name: arr = params[name].copy().transpose(0, 2, 3, 1).astype(np.bool) packed_params[name] = arr elif "w_conv2" in name: arr = params[name].copy().transpose(0, 2, 3, 1) arr = np.packbits(arr.astype(np.bool), axis=3, bitorder="little").view(np.uint16) packed_params[name] = arr elif "bn_t" in name: packed_params[name] = params[name].copy().transpose(1, 2, 0) else: packed_params[name] = params[name].copy()
def Extract(self, features, num_features_per_region=None): """Extracts aggregated representation. Args: features: [N, D] float numpy array with N local feature descriptors. num_features_per_region: Required only if computing regional aggregated representations, otherwise optional. List of number of features per region, such that sum(num_features_per_region) = N. It indicates which features correspond to each region. Returns: aggregated_descriptors: 1-D numpy array. feature_visual_words: Used only for ASMK/ASMK* aggregation type. 1-D numpy array denoting visual words corresponding to the `aggregated_descriptors`. Raises: ValueError: If inputs are misconfigured. """ features = tf.cast(features, dtype=tf.float32) if num_features_per_region is None: # Use dummy value since it is unused. num_features_per_region = [] else: num_features_per_region = tf.cast(num_features_per_region, dtype=tf.int32) if len(num_features_per_region ) and sum(num_features_per_region) != features.shape[0]: raise ValueError( "Incorrect arguments: sum(num_features_per_region) and " "features.shape[0] are different: %d vs %d" % (sum(num_features_per_region), features.shape[0])) # Extract features based on desired options. if self._aggregation_type == _VLAD: # Feature visual words are unused in the case of VLAD, so just return # dummy constant. feature_visual_words = tf.constant(-1, dtype=tf.int32) if self._use_regional_aggregation: aggregated_descriptors = self._ComputeRvlad( features, num_features_per_region, self._codebook, use_l2_normalization=self._use_l2_normalization, num_assignments=self._num_assignments) else: aggregated_descriptors = self._ComputeVlad( features, self._codebook, use_l2_normalization=self._use_l2_normalization, num_assignments=self._num_assignments) elif (self._aggregation_type == _ASMK or self._aggregation_type == _ASMK_STAR): if self._use_regional_aggregation: (aggregated_descriptors, feature_visual_words) = self._ComputeRasmk( features, num_features_per_region, self._codebook, num_assignments=self._num_assignments) else: (aggregated_descriptors, feature_visual_words) = self._ComputeAsmk( features, self._codebook, num_assignments=self._num_assignments) feature_visual_words_output = feature_visual_words.numpy() # If using ASMK*/RASMK*, binarize the aggregated descriptors. if self._aggregation_type == _ASMK_STAR: reshaped_aggregated_descriptors = np.reshape( aggregated_descriptors, [-1, self._feature_dimensionality]) packed_descriptors = np.packbits( reshaped_aggregated_descriptors > 0, axis=1) aggregated_descriptors_output = np.reshape(packed_descriptors, [-1]) else: aggregated_descriptors_output = aggregated_descriptors.numpy() return aggregated_descriptors_output, feature_visual_words_output
def binArr2int(arr): """ Convert a binary array into its (long) integer representation. """ from numpy import packbits tmp2 = packbits(arr.astype(int)) return sum(val * 256 ** i for i, val in enumerate(tmp2[::-1]))
def dba_loop(s, c=None, max_it=10, thr=0.001, mask=None, keep_averages=False, use_c=False, nb_initial_samples=None, nb_prob_samples=None, **kwargs): """Loop around the DTW Barycenter Averaging (DBA) method until convergence. :param s: Container of sequences :param c: Initial averaging sequence. If none is given, the first one is used (unless if nb_initial_samples is set). Better performance can be achieved by starting from an informed starting point (Petitjean et al. 2011). :param max_it: Maximal number of calls to DBA. :param thr: Convergence if the DBA is changing less than this value. :param mask: Boolean array with the series in s to use. If None, use all. :param keep_averages: Keep all DBA values (for visualisation or debugging). :param nb_initial_samples: If c is None, and this argument is not None, select nb_initial_samples samples and select the series closest to all other samples as c. :param nb_prob_samples: Probabilistically sample the best path instead of the deterministic version. :param use_c: Use a fast C implementation instead of a Python version. :param kwargs: Arguments for dtw.distance """ if np is None: raise NumpyException('The method dba_loop requires Numpy to be available') s = SeriesContainer.wrap(s) ndim = s.detected_ndim avg = None avgs = None if keep_averages: avgs = [] if mask is None: mask = np.full((len(s),), True, dtype=bool) if nb_prob_samples is None: nb_prob_samples = 0 if c is None: if nb_initial_samples is None: curi = 0 while mask[curi] is False: curi += 1 c = s[curi] else: c = get_good_c(s, mask, nb_initial_samples, use_c=use_c, **kwargs) # You can also use a constant function, but this gives worse performance. # After the first iteration, this will be the average of all # sequences. The disadvantage is that this might create e.g. multiple # peaks for a sequence with only one peak (but shifted) and then the # original sequences will map their single peak to the different peaks # in the first average and converge to that as a local optimum. # t = s.get_avg_length() # c = array.array('d', [0] * t) if use_c: if np is not None and isinstance(mask, np.ndarray): # The C code requires a bit array of uint8 (or unsigned char) mask_copy = np.packbits(mask, bitorder='little') else: raise Exception('Mask only implemented for C when passing a Numpy array. ' f'Got {type(mask)}') else: mask_copy = mask if not use_c and nb_prob_samples != 0: raise Exception('The parameter nb_prob_samples is not available in the Python implementation!') for it in range(max_it): logger.debug(f'DBA Iteration {it}') if use_c: assert(c is not None) c_copy = c.copy() # The C code reuses this array # c_copy = c.flatten() if ndim == 1: dtw_cc.dba(s, c_copy, mask=mask_copy, nb_prob_samples=nb_prob_samples, **kwargs) # avg = c_copy else: dtw_cc.dba_ndim(s, c_copy, mask=mask_copy, nb_prob_samples=nb_prob_samples, ndim=ndim, **kwargs) # avg = c_copy.reshape(-1, ndim) avg = c_copy else: if not nb_prob_samples: avg = dba(s, c, mask=mask, use_c=use_c, **kwargs) else: avg = dba(s, c, mask=mask, nb_prob_samples=nb_prob_samples, use_c=use_c, **kwargs) if keep_averages: avgs.append(avg) if thr is not None and c is not None: diff = 0 # diff = np.sum(np.subtract(avg, c)) if ndim == 1: for av, cv in zip(avg, c): diff += abs(av - cv) else: for av, cv in zip(avg, c): diff += max(abs(av[d] - cv[d]) for d in range(ndim)) diff /= len(avg) if diff <= thr: logger.debug(f'DBA converged at {it} iterations (avg diff={diff}).') break c = avg if keep_averages: return avg, avgs return avg
def fast_show (self, style): region = self.buf if self.v_flip: region = numpy.fliplr(region) if self.h_flip: region = numpy.flipud(region) if self.rotation: region = numpy.rot90(region, self.rotation // 90) buf_a = numpy.packbits(numpy.where(region == BLACK, 0, 1)).tolist() buf_b = self.buf_black self.setup() packed_height = list(struct.pack('<H', self.rows)) if isinstance(packed_height[0], str): packed_height = map(ord, packed_height) self._send_command(0x74, 0x54) # Set Analog Block Control self._send_command(0x7e, 0x3b) # Set Digital Block Control self._send_command(0x01, packed_height + [0x00]) # Gate setting self._send_command(0x03, [0b10000, 0b0001]) # Gate Driving Voltage self._send_command(0x3a, 0x07) # Dummy line period self._send_command(0x3b, 0x04) # Gate line width self._send_command(0x11, 0x03) # Data entry mode setting 0x03 = X/Y increment self._send_command(0x04) # Power On self._send_command(0x2c, 0x3c) # VCOM Register, 0x3c = -1.5v? # border color self._send_command(0x3c, 0x00) if self.border_colour == self.BLACK: self._send_command(0x3c, 0x00) elif self.border_colour == self.WHITE: self._send_command(0x3c, 0xFF) self._send_command(0x44, [0x00, (self.cols // 8) - 1]) # Set RAM X Start/End self._send_command(0x45, [0x00, 0x00] + packed_height) # Set RAM Y Start/End self._send_command(0x4e, 0x00) # Set RAM X Pointer Start self._send_command(0x4f, [0x00, 0x00]) # Set RAM Y Pointer Start self._send_command(0x24, buf_a) if style == self.BLACK: self._send_command(0x32, self._luts["clear-black"]) # Set LUTs self._send_command(0x22, 0xc7) # Display Update Sequence self._send_command(0x20) # Trigger Display Update # time.sleep(SLEEP_TIME) self._busy_wait() self._send_command(0x32, self._luts["draw-from-black"]) # Set LUTs self._send_command(0x22, 0xc7) # Display Update Sequence self._send_command(0x20) # Trigger Display Update else: self._send_command(0x3c, 0x00) self._send_command(0x32, self._luts["clear-white"]) # Set LUTs self._send_command(0x22, 0xc7) # Display Update Sequence self._send_command(0x20) # Trigger Display Update # time.sleep(SLEEP_TIME) self._busy_wait() self._send_command(0x3c, 0xFF) self._send_command(0x32, self._luts["draw-from-white"]) # Set LUTs self._send_command(0x22, 0xc7) # Display Update Sequence self._send_command(0x20) # Trigger Display Update time.sleep(0.05) self._busy_wait() self._send_command(0x10, 0x01) # Enter Deep Sleep
def makeBlockMapByte(self, sizeBlock): arr = numpy.packbits(self.blockMapArr) return bytearray(arr)