def image2vec_direct(self, dgrad): # convert every pixel tp hbv chunk_size = 500 # encode every individual pixel value with a vector vectors = [] for i in range(dgrad.shape[0]): for j in range(dgrad.shape[1]): if (dgrad[i][j][0] == 0 or dgrad[i][j][1] == 0): continue # encode first pixel value tmp0 = pyhdc.LBV() nbts0 = self.num2vec(dgrad[i][j][0] / 5, chunk_size) for k in range(nbts0): tmp0.flip(k) tmp0.permute('x', 0) # encode second pixel value tmp1 = pyhdc.LBV() nbts1 = self.num2vec(dgrad[i][j][1] / 5, chunk_size) for k in range(nbts1): tmp1.flip(k) tmp1.permute('y', 0) # v = XOR(P_1(v0), P_2(v1)) tmp = pyhdc.LBV() vectors.append(tmp) vectors[-1].xor(tmp0) vectors[-1].xor(tmp1) # apply consensus sum on all vectors ret = pyhdc.csum(vectors, '1') return ret
def i2v_baseline(image, vmap): pixel_count = 0 x = pyhdc.LBV() for i in range(0, image.shape[0]): for j in range(0, image.shape[1]): val = image[i, j] if (val < 1): continue pixel_count += 1 # copy the vector from map v = pyhdc.LBV() v.xor(vmap[val - 1]) # permute if (i > 0): v.permute('x', i - 1) if (j > 0): v.permute('y', j - 1) # add to the accumulator x.xor(v) return x, pixel_count
def image2vec(self, dgrad=None): ret = pyhdc.LBV() #params = [self.z_err / 2] #params = [self.x_err2, self.y_err2, self.x_err, self.y_err] params = [ self.x_err2, self.y_err2, self.x_err, self.y_err, self.z_err, self.z_err2 ] #params = [self.x_err2, self.y_err2, self.x_err, self.y_err, self.z_err, self.z_err2, self.e_count] #params = [self.x_err2, self.y_err2, self.z_err2] #params = [self.x_err, self.y_err, self.z_err, self.g_count] #params = [self.x_err, self.y_err, self.z_err, self.x_err, self.y_err, self.z_err, self.e_count, self.p_count, self.n_count, self.g_count] #step = 50 #for i in range(self.dvs_img.shape[0] // step): # for j in range(self.dvs_img.shape[1] // step): # dvs_img_ = np.copy(self.dvs_img[i:i+step,j:j+step,:]) # dgrad_ = np.zeros((dvs_img_.shape[0], dvs_img_.shape[1], 2), dtype=np.float32) # x_err, y_err, yaw_err, z_err, e_count, nz_avg = \ # pydvs.dvs_flow_err(dvs_img_, dgrad_) # # if (e_count < 20): # x_err = np.nan # y_err = np.nan # z_err = np.nan # e_count /= (6250 / (self.dvs_img.shape[0] * self.dvs_img.shape[1])) * step * step # e_count -= 1 #params.append(x_err / 5) #params.append(y_err / 5) #params.append(z_err / 500) #params.append(e_count) #print (self.dvs_img.shape) #print (params) #print (len(params)) chunk_size = 500 to_encode = [self.num2vec(p, chunk_size) for p in params] scale = 2 #ret.rand() for i, n_bits in enumerate(to_encode): start_offset = i * chunk_size * scale end_chunk = (i + 1) * chunk_size * scale if (np.isnan(params[i])): continue #for j in range(start_offset, n_bits + start_offset): # ret.flip(j) for j in range(start_offset, end_chunk): if (j <= n_bits + start_offset and not ret.get_bit(j)): ret.flip(j) if (j > n_bits + start_offset and ret.get_bit(j)): ret.flip(j) return ret
def window_stack(image, vmap, size, stride): vectors = [] if not (image.shape[0] - size) % stride == 0 or not (image.shape[1] - size) % stride == 0: x = numpy.zeros( (int((math.ceil((image.shape[0] - size) / stride) + int(math.ceil(size / stride))) * stride), int((math.ceil((image.shape[1] - size) / stride) + int(math.ceil(size / stride))) * stride), image.shape[2]), dtype=int) x[:image.shape[0], :image.shape[1], :] = image image = x # print('New image size: ' + str(image.shape)) # print('Image size: ' + str(image.shape)) row = 0 while row + size < image.shape[0]: # print('\trow: ' + str(row)) col = 0 while col + size < image.shape[1]: # print('\t\tcol: ' + str(col)) window = img2vec.img2vec(image[row:row + size, col:col + size, :], vmap) vectors.append(window) col += stride row += stride # print('DONE---') ret = pyhdc.LBV() if len(vectors) == 1: ret.xor(vectors[0]) return ret th = len(vectors) // 2 for i in range(pyhdc.get_vector_width()): cnt = 0 for v in vectors: if v.get_bit(i): cnt += 1 if cnt >= th: ret.flip(i) return ret
def bind(vectors, basis_vectors): scaled_v = [] for i, v in enumerate(vectors): x = pyhdc.LBV() x.xor(v) x.xor(basis_vectors[i]) scaled_v.append(x) return csum(scaled_v)
def csum(vectors): th = len(vectors) // 2 ret = pyhdc.LBV() for i in range(pyhdc.get_vector_width()): cnt = 0 for v in vectors: if (v.get_bit(i)): cnt += 1 if (cnt >= th): ret.flip(i) return ret
def vmap2images(vmap, scale=9): shape = (90, 90) for i, v in enumerate(vmap): if (i == 0) or (i > 254): continue img = vec_visual(v, shape, scale) name = "frame_" + str(i - 1).rjust(4, '0') + ".png" z = np.zeros((shape[0] * scale, shape[1] * scale), dtype=np.uint8) img = np.dstack((img, img, img)) #cv2.imwrite("/home/ncos/Desktop/vmap_viz/" + name, img) #continue v_0 = pyhdc.LBV() v_0.xor(vmap[0]) v_0.xor(v) v_0_img = vec_visual(v_0, shape, scale) img_r = np.copy(img) img_r[:,:,2] = v_0_img if (i > 0): v_prev_ = vmap[i - 1] v_prev = pyhdc.LBV() v_prev.xor(v_prev_) v_prev.xor(v) img_diff = vec_visual(v_prev, shape, scale) img[:,:,2] = img_diff sep = np.zeros((shape[0] * scale, 10, 3), dtype=np.uint8) img = np.hstack((img, sep, img_r)) cv2.imwrite("/home/ncos/Desktop/vmap_viz/" + name, img)
def test_bitmanip(v_): v = pyhdc.LBV() v.xor(v_) # copy v_ to v print("Read bit test") ref_str = str(v) print(ref_str) test_str = "" for i in range(pyhdc.get_vector_width()): if (v.get_bit(i)): test_str += '1' else: test_str += '0' if ((i + 1) % 32 == 0): test_str += '_' print(test_str) t1_result = (test_str == ref_str) print("Passed:", t1_result) print() print("Flip bit test") print(v) for i in range(pyhdc.get_vector_width() // 2): v.flip(i * 2) print(v) for i in range(pyhdc.get_vector_width() // 2): v.flip(i * 2) print(v) for i in range(pyhdc.get_vector_width()): if (v.get_bit(i)): v.flip(i) print(v) t2_result = v.is_zero() print("Passed:", t2_result) print() print("Count bit test") nbits = 0 for i in range(pyhdc.get_vector_width()): choice = random.choice([True, False]) if (choice): v.flip(i) nbits += 1 test_nbits = v.count() print(v) t3_result = (nbits == test_nbits) print("Passed:", t3_result, "true value = ", nbits, "result = ", test_nbits) return t1_result & t2_result & t3_result
def test_permute(v_, axis, order, times): v = pyhdc.LBV() v.xor(v_) # copy v_ to v print("v permuted with P" + axis + str(order), times, "time(s)") print(v) for i in range(times): v.permute(axis, order) print(v) for i in range(times): v.inv_permute(axis, order) print(v) # Check if the same after inverse permutation v.xor(v_) print("Passed:", v.is_zero()) print() return v.is_zero()
def create_memory(X, seq_len, basis_vectors): MEMORY = [] MEMORY_image = [] for i in range(0, len(X) - seq_len, seq_len): vecs = [] for j in range(i, i + seq_len): vecs.append(X[j]) v_mem = bind(vecs, basis_vectors) MEMORY.append(v_mem) test = pyhdc.LBV() test.xor(v_mem) test.xor(basis_vectors[0]) MEMORY_image.append(test) break return MEMORY, MEMORY_image
def np_vec2c_vec(c_vec): x = pyhdc.LBV() if (not x.is_zero()): print("ERROR! - starting with a nonzero vector") btc = 0 for i, bit in enumerate(c_vec): if (bit == '1'): btc += 1 x.flip(i) #if (pyhdc.get_vector_width() != len(c_vec)): # print ("Vector length mismatch:", len(c_vec), pyhdc.get_vector_width()) # check at least a bit count nbits = x.count() if (btc != nbits): print("Bitcount mismatch!", nbits, btc) return x
def i2v_fast(image, vmap): pixel_count = 0 x = pyhdc.LBV() for i in range(0, image.shape[0]): # permute if (i > 0): x.permute('x', i - 1) for j in range(0, image.shape[1]): val = image[i, j] if (val < 1): continue pixel_count += 1 # permute if (j > 0): x.permute('y', j - 1) # add to the accumulator x.xor(vmap[val - 1]) return x, pixel_count
def memscore(v_img, M, M_img, seq_len, basis_vectors): res = [] for i in range(len(M)): h0 = safe_hamming(M[i], v_img) # should be 4k h1 = safe_hamming(M_img[i], v_img) # should become small count_raw = M[i].count() count_ub = M_img[i].count() test = pyhdc.LBV() test.xor(M[i]) test.xor(v_img) res.append(h1) #s = str(i).rjust(4) + str(h1).rjust(5) + "|" #for i in range(0, seq_len): # h = safe_hamming(test, basis_vectors[i]) # should become small? # s += str(h).rjust(5) #print (s) #print (i, ": ", h1, "|") #print (i, ": ", h0, "->", h1, "|", count_raw, "->", count_ub) return min(res)
def num2vec(self, i, num): base_vec = pyhdc.LBV() base_vec.xor(self.vmaps[i][num]) return base_vec
print("Max order on y:", pyhdc.get_max_order('y')) print("Vector width:", pyhdc.get_vector_width(), "bits") print() patch_size = int(args.fill_rate * v_len) stride = int(v_len / (4 * args.size)) z_fill = int(v_len / 4) print("patch_size:", patch_size) print("stride:", stride, "; hamming =", 2 * stride) print("z-fill:", z_fill) print() vmap = [] for i in range(args.size): v = pyhdc.LBV() start_bit = stride * i end_bit = min(start_bit + patch_size, v_len - 1) for bit_idx in range(start_bit, end_bit + 1): v.flip(bit_idx) vmap.append(v) # 'gravity' vector v = pyhdc.LBV() for i in range(0, v_len - stride, 2 * stride): for bit_idx in range(i, i + stride): v.flip(bit_idx) vmap.append(v) # permute the whole thing permute(vmap, 50)
#!/usr/bin/python3 import pyhdc import time, random print("Max order on x:", pyhdc.get_max_order('x')) print("Max order on y:", pyhdc.get_max_order('y')) print("Vector width:", pyhdc.get_vector_width(), "bits") print() a = pyhdc.LBV() a.rand() # Get a random vector print("Vector a:") print(a) print() # There are 'x' and 'y' permutations; the seed (first one) is generated separately # and independently for both; the second value is 'order' - how many times the # seed permutation should be permuted with itself. All permutations are precomputed, so # only up to 'pyhdc.get_max_order' orders can be used. print("P0:") print(pyhdc.permutation_to_str('x', 0)) print() print("P1:") print(pyhdc.permutation_to_str('x', 1)) print() def test_permute(v_, axis, order, times): v = pyhdc.LBV()
def safeHamming(v1, v2): x = pyhdc.LBV() x.xor(v1) # x = v1 x.xor(v2) # x = v1 xor v2 return x.count()
#v = pyhdc.LBV() #v.rand() vmap.append(v) f.close() print("Read", len(vmap), "vectors") print() # sanity check check_vmap(vmap) # convert to vectors print("Processing", len(X), "images") f = open(os.path.join(args.base_dir, 'im2vec.txt'), 'w') ref_vec = pyhdc.LBV() for i, img_name in enumerate(X): v = img2vec(cv2.imread(img_name, cv2.IMREAD_UNCHANGED), vmap) s = v.__repr__() for vel in y[i]: s += " " + str(vel) f.write(s + "\n") # ====== if (i > 0): ref_vec.xor(v) hamming = ref_vec.count() print("\t\tHamming = ", hamming) ref_vec.xor(ref_vec)
get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O2O3_PLAIN_WALL_II", X, y, X_val, y_val) get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O2O3_S3", X, y, X_val, y_val) get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O3_PLAIN_WALL_P3", X, y, X_val, y_val) #get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O3_TOP", X, y, X_val, y_val) print ("Input dataset size:", len(X), "data points") print ("\t\t", len(X_val), "validation points") print () vel_converter = Vel2Vec() MEMORY = [] MEMORY_image = [] basis_vectors = [] for i in range(4): x = pyhdc.LBV() for k in range(i + 1): x.rand() basis_vectors.append(x) # create memory for i, v_img in enumerate(X): v_vel = vel_converter.vel2vecs(y[i]) v_mem = bind([v_img] + v_vel, basis_vectors) MEMORY.append(v_mem) test = pyhdc.LBV() test.xor(v_mem) test.xor(basis_vectors[0]) MEMORY_image.append(test)
get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O2O3_S3", X, y, X_val, y_val) get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O3_PLAIN_WALL_P3", X, y, X_val, y_val) #get_X_y("/home/ncos/raid/EV-IMO/SET4/O1O3_TOP", X, y, X_val, y_val) max_bb = 900 X = X[100:] #X = X[100:200 + max_bb] print("Input dataset size:", len(X), "data points") print("\t\t", len(X_val), "validation points") print() basis_vectors = [] for i in range(max_bb): x = pyhdc.LBV() for k in range(i + 1): x.rand() basis_vectors.append(x) seq_len = 40 print("Creating a memory") MEMORY, MEMORY_image = create_memory(X, seq_len, basis_vectors) h = memscore(X[0], MEMORY, MEMORY_image, seq_len, basis_vectors) print(h) sys.exit() for seq_len in range(1, 10000): MEMORY, MEMORY_image = create_memory(X, seq_len, basis_vectors)