def generate_rectangle_region_old(x_range, y_range, X, Y, resolution = 100): fft_X = np.fft.fft(X.v) fft_Y = np.fft.fft(Y.v) phi = np.angle(fft_X) gamma = np.angle(fft_Y) assert np.allclose(np.abs(fft_X), 1) assert np.allclose(np.abs(fft_Y), 1) if any(phi == 0): # can't divide, just use summation region_analytic = np.zeros_like(X.v) for x in np.linspace(*x_range, resolution): for y in np.linspace(*y_range, resolution): region_analytic += encode_point(x, y, X, Y).v return spa.SemanticPointer(region_analytic/np.max(spatial_dot(region_analytic, np.linspace(*x_range,resolution/5), np.linspace(*y_range,resolution/5),X, Y))) else: # (FYI this is Euler's formula as we are applying it implicitly) # pi = phi * x1 # assert np.allclose(fft_X ** x1, np.cos(pi) + 1j * np.sin(pi)) INVPHI = spa.SemanticPointer(np.fft.ifft(1j / phi)) INVGAMMA = spa.SemanticPointer(np.fft.ifft(1j / gamma)) region_algebraic = (((power(X, x_range[1]) - power(X, x_range[0])) * INVPHI) * (((power(Y, y_range[1]) - power(Y, y_range[0])) * INVGAMMA))) return region_algebraic
def encode_memory(pred_obj_list, xs, ys, obj_vectors, axis_vec, n, m, size=120, lim=5): individual_obj_vectors = obj_vectors[pred_obj_list] scale = 120 / (lim * 2) loc_vectors = np.array([ encode_point(x / scale - lim, y / scale - lim, axis_vec[0], axis_vec[1]) for x, y in zip(np.array(xs).ravel(), np.array(ys).ravel()) ]).reshape(n, m) encoded_objs = individual_obj_vectors * loc_vectors obj_loc_memory = np.sum(encoded_objs, axis=1) obj_memory = np.sum(individual_obj_vectors, axis=1) memory_data = {} memory_data['obj_loc_memory'] = obj_loc_memory memory_data['obj_memory'] = obj_memory memory_data['individual_obj_vectors'] = individual_obj_vectors return memory_data
def convert_pixels(img, X, Y, spa_range=(-5, 5)): size = spa_range[1] - spa_range[0] l, w = img.shape spa = np.zeros_like(X.v) for i in range(l): for j in range(w): if img[i, j] > 0: spa += encode_point(i * size / l + spa_range[0], j * size / w + spa_range[0], X, Y).v * img[i, j] return spa
def encode_memory_shape(pred_obj_list, xs, ys, obj_vectors, axis_vec, shape, n, m, size=120, lim=5): individual_obj_vectors = obj_vectors[pred_obj_list] scale = 120 / (lim * 2) loc_vectors = np.array([ encode_point(x / scale - lim, y / scale - lim, axis_vec[0], axis_vec[1]) for x, y in zip(np.array(xs).ravel(), np.array(ys).ravel()) ]).reshape(n, m) encoded_objs = individual_obj_vectors * loc_vectors * shape obj_loc_memory = np.sum(encoded_objs, axis=1) obj_memory = np.sum(individual_obj_vectors, axis=1) memory_data = {} memory_data['obj_loc_memory'] = obj_loc_memory memory_data['obj_memory'] = obj_memory memory_data['individual_obj_vectors'] = individual_obj_vectors return memory_data # def main(): # parser = argparse.ArgumentParser() # parser.add_argument('--n', type=int, default=5000) # parser.add_argument('--m', type=int, default=3) # parser.add_argument('--imdim', type=int, default=28) # parser.add_argument('--savefile', type=str, default='data512') # parser.add_argument('--imagefile', type=str, default='generated_images') # parser.add_argument('--vectorfile', type=str, default='image_and_memory') # parser.add_argument('--modelfile', type=str, default='mnist_net') # args = parser.parse_args() # n = args.n # m = args.m # im_dim = args.imdim # savefile = args.savefile # imagefile = args.imagefile # vectorfile = args.vectorfile # modelfile = args.modelfile # # Get a batch of n random images with m digits # model = keras.models.load_model(modelfile+'.h5') # objs = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"] # mnist_datafile = imagefile+'.p' # img_data = pickle.load(open(mnist_datafile,'rb')) # images = img_data['images'] # xs = img_data['x'] # ys = img_data['y'] # obj_list = img_data['obj_list'] # pred_obj_list = decode_image(images, xs, ys, im_dim, model) # spa_datafile = vectorfile+'.p' # spa_data = pickle.load(open(spa_datafile,'rb')) # axis_vec = spa_data['axis_vec'] # obj_dict = spa_data['obj_dict'] # objs = ["ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN", "EIGHT", "NINE"] # obj_vectors = np.stack([obj_dict[_] for _ in objs]) # size = 120 # lim = 5 # memory_data = encode_memory(pred_obj_list,xs,ys,obj_vectors,axis_vec,n,m) # memory_data['obj_vectors'] = obj_vectors # memory_data['objs'] = objs # memory_data['axis_vec'] = axis_vec # memory_data['obj_dict'] = obj_dict # memory_data['pred_obj_list'] = pred_obj_list # pickle.dump(memory_data, open(savefile+".p", "wb")) # if __name__ == '__main__': # main()
for seed in range(n_seeds): rstate = np.random.RandomState(seed=seed) x_axis_sp = make_good_unitary(dim, rng=rstate) y_axis_sp = make_good_unitary(dim, rng=rstate) heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp) vocab_sps = {} for i, animal in enumerate(vocab_labels): vocab_sps[animal] = spa.SemanticPointer(dim) vocab_vectors[i, :] = vocab_sps[animal].v mem = spa.SemanticPointer(data=np.zeros(dim)) fox_pos1 = encode_point(1.2, 1.3, x_axis_sp, y_axis_sp) fox_pos2 = encode_point(-3.4, -1.1, x_axis_sp, y_axis_sp) dog_pos = encode_point(1.7, -1.1, x_axis_sp, y_axis_sp) badger_pos = encode_point(4.1, 3.2, x_axis_sp, y_axis_sp) bear_pos = encode_point(2.1, 2.4, x_axis_sp, y_axis_sp) none_pos = encode_point(0, 0, x_axis_sp, y_axis_sp) mem += vocab_sps['Fox'] * fox_pos1 mem += vocab_sps['Fox'] * fox_pos2 mem += vocab_sps['Dog'] * dog_pos mem += vocab_sps['Badger'] * badger_pos mem += vocab_sps['Bear'] * bear_pos mem.normalize() for i, animal in enumerate(vocab_labels):
vmin=vmin, vmax=vmax, cmap=cmap) plt.colorbar() if name: plt.suptitle(name) ############### # Single Item # ############### if "Single Item" in plot_types: fig, ax = plt.subplots(tight_layout=True, figsize=(4, 4)) coord_sp = encode_point(3, -2, x_axis_sp, y_axis_sp) heatmap( coord_sp.v, heatmap_vectors, ax, name="Single Object", vmin=vmin, vmax=vmax, cmap=cmap, ) fig.savefig('figures/single_item.pdf', dpi=600, bbox_inches='tight') ##################### # Two Items Decoded # #####################
normalize_memory=args.normalize_memory, x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp, ) data_gen = dataset.sample_generator(item_set=vocab_vectors_copy) for s in range(args.n_samples): # Acquire the next sample mem_v, item_v, coord_v = data_gen.__next__() mem = spa.SemanticPointer(data=mem_v) # Pick one item that is in the memory (in this case the first one) item_loc = encode_point(coord_v[0], coord_v[1], x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp) item_sp = spa.SemanticPointer(data=item_v) items_used[li, n, s, :] = item_v loc_sp_used[li, n, s, :] = item_loc.v coord_used[li, n, s, :] = coord_v extract_item = (mem * ~item_loc).v extract_loc = (mem * ~item_sp).v extract_items[li, n, s, :] = extract_item extract_locs[li, n, s, :] = extract_loc lq_similarity[li, n, s] = np.dot(extract_item, item_v) iq_similarity[li, n, s] = np.dot(extract_loc, item_loc.v)
def main(): parser = argparse.ArgumentParser('Measuring the performance of various capabilities of spatial semantic pointers') parser.add_argument('--n-samples', type=int, default=100, help='Number of samples to evaluate per item number') parser.add_argument('--dim', type=int, default=512, help='Dimensionality of the semantic pointers') parser.add_argument('--neurons-per-dim', type=int, default=15) parser.add_argument('--limit', type=int, default=5, help='The absolute min and max of the space') parser.add_argument('--res', type=int, default=128, help='Resolution for the linspace') parser.add_argument('--n-items-min', type=int, default=2, help='Lowest number of items in a memory') parser.add_argument('--n-items-max', type=int, default=24, help='Highest number of items in a memory') # One threshold is best for region queries, the other for the other queries, TODO: use them in the appropriate places parser.add_argument('--similarity-threshold', type=float, default=0.1, help='Similarity must be above this value to count') # parser.add_argument('--similarity-threshold', type=float, default=0.25, help='Similarity must be above this value to count') parser.add_argument('--seed', type=int, default=13) parser.add_argument('--folder', default='output/non_neural_results', help='folder to save results') args = parser.parse_args() fname = 'seed{}_dim{}_min{}_max{}.npz'.format(args.seed, args.dim, args.n_items_min, args.n_items_max) # Range of item sizes to try item_range = list(range(args.n_items_min, args.n_items_max + 1)) n_item_range = len(item_range) xs = np.linspace(-args.limit, args.limit, args.res) ys = np.linspace(-args.limit, args.limit, args.res) rstate = np.random.RandomState(seed=args.seed) x_axis_sp = make_good_unitary(args.dim, rng=rstate) y_axis_sp = make_good_unitary(args.dim, rng=rstate) heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp) # These are for dealing with shifted memories, that could potentially go outside the normal range larger_heatmap_vectors = get_heatmap_vectors(xs*2, ys*2, x_axis_sp, y_axis_sp) if not os.path.exists(args.folder): os.makedirs(args.folder) results = { 'single_object': np.zeros((n_item_range, args.n_samples)), 'missing_object': np.zeros((n_item_range, args.n_samples)), 'duplicate_object': np.zeros((n_item_range, args.n_samples)), 'location': np.zeros((n_item_range, args.n_samples)), 'sliding_group': np.zeros((n_item_range, args.n_samples)), 'sliding_object': np.zeros((n_item_range, args.n_samples)), 'sliding_object_moved_only': np.zeros((n_item_range, args.n_samples)), 'sliding_object_scaled': np.zeros((n_item_range, args.n_samples)), 'sliding_object_scaled_moved_only': np.zeros((n_item_range, args.n_samples)), 'region': np.zeros((n_item_range, args.n_samples)), } for n, n_items in enumerate(item_range): print("Running experiments for n_items={}".format(n_items)) vocab = spa.Vocabulary(args.dim) # n_vocab_vectors = args.n_items_max * 2 n_vocab_vectors = n_items * 2 vocab_vectors = np.zeros((n_vocab_vectors, args.dim)) # print("Generating {0} vocab items".format(n_vocab_vectors)) for i in range(n_vocab_vectors): p = vocab.create_pointer() vocab_vectors[i, :] = p.v # print("Vocab generation complete") # A copy that will get shuffled around in MemoryDataset vocab_vectors_copy = vocab_vectors.copy() dataset = MemoryDataset( dim=args.dim, n_items=0, # unused, allow_duplicate_items=False, limits=(-args.limit, args.limit, -args.limit, args.limit), normalize_memory=True, x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp, ) # data_gen = dataset.sample_generator(item_set=vocab_vectors_copy) data_gen_var_item = dataset.variable_item_sample_generator( item_set=vocab_vectors_copy, n_items_min=n_items, n_items_max=n_items, ) data_gen_duplicate = dataset.duplicates_sample_generator( item_set=vocab_vectors_copy, n_items_min=max(2, n_items), n_items_max=n_items, ) data_gen_multi = dataset.multi_return_sample_generator( item_set=vocab_vectors_copy, n_items=n_items, allow_duplicate_items=False, ) # Generates circular regions data_gen_region = dataset.region_sample_generator( vocab_vectors=vocab_vectors, xs=xs, ys=ys, n_items_min=n_items, n_items_max=n_items, rad_min=1, rad_max=3 ) # Query Single Object and Query Location for s in range(args.n_samples): # Acquire the next sample mem_v, item_v, coord_v, n_items = data_gen_var_item.__next__() item_loc = encode_point(coord_v[0], coord_v[1], x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp) mem_sp = spa.SemanticPointer(data=mem_v) loc_result = mem_sp * ~ spa.SemanticPointer(data=item_v) item_result = mem_sp * ~ item_loc # using a random semantic pointer here loc_missing_result = spa.SemanticPointer(data=mem_v) * ~ spa.SemanticPointer(args.dim) # TODO: find the grid coordinate of the top location, count as correct it matches the real coordinate results['single_object'][n, s] = loc_match( sp=loc_result, heatmap_vectors=heatmap_vectors, coord=coord_v, xs=xs, ys=ys, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) results['location'][n, s] = item_match( sp=item_result, vocab_vectors=vocab_vectors, item=item_v, sim_threshold=args.similarity_threshold, ) results['missing_object'][n, s] = 1 - loc_match( sp=loc_missing_result, heatmap_vectors=heatmap_vectors, coord=coord_v, xs=xs, ys=ys, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) # Query Duplicate Objects for s in range(args.n_samples): # Acquire the next sample for duplicates mem_v, item_v, coord1_v, coord2_v = data_gen_duplicate.__next__() loc_results = spa.SemanticPointer(data=mem_v) *~ spa.SemanticPointer(data=item_v) # TODO: find the grid coordinates of the top two locations, count as correct if they match the real coordinates results['duplicate_object'][n, s] = loc_match_duplicate( loc_results, heatmap_vectors, coord1=coord1_v, coord2=coord2_v, xs=xs, ys=ys, sim_threshold=args.similarity_threshold, ) # Query Region # NOTE: threshold will depend on region size # TODO: redo that old region experiment with better region generation for s in range(args.n_samples): mem_v, items, coords, region_v, vocab_indices = data_gen_region.__next__() mem_sp = spa.SemanticPointer(data=mem_v) region_sp = spa.SemanticPointer(data=region_v) region_results = mem_sp * ~region_sp results['region'][n, s] = region_item_match( region_results, vocab_vectors, vocab_indices, sim_threshold=args.similarity_threshold ) # Sliding Whole Group and Sliding Single Object # accuracy will be the number of matches in the end for s in range(args.n_samples): mem_v, item_vs, coord_vs = data_gen_multi.__next__() mem_sp = spa.SemanticPointer(data=mem_v) # Choose random amount to move by dx = np.random.uniform(-args.limit / 2., args.limit / 2.) dy = np.random.uniform(-args.limit / 2., args.limit / 2.) slide_vec = np.array([dx, dy]) # slide_vec = np.array([dy, dx]) d_coord = encode_point(dx, dy, x_axis_sp, y_axis_sp) slide_mem_sp = mem_sp * d_coord first_item = spa.SemanticPointer(data=item_vs[0, :]) first_coord = encode_point(coord_vs[0, 0], coord_vs[0, 1], x_axis_sp, y_axis_sp) single_slide_mem_sp = mem_sp + first_item*first_coord*d_coord - first_item*first_coord single_slide_mem_sp.normalize() # scaling to account for normalization scaling = 1 / np.sqrt(n_items) single_slide_scaled_mem_sp = mem_sp + scaling*first_item*first_coord*d_coord - scaling*first_item*first_coord single_slide_scaled_mem_sp.normalize() res_group = 0 res_single = 0 res_single_move_only = 0 res_single_scaled = 0 res_single_scaled_move_only = 0 for i in range(n_items): loc_result = slide_mem_sp * ~ spa.SemanticPointer(data=item_vs[i, :]) res_group += loc_match( sp=loc_result, heatmap_vectors=larger_heatmap_vectors, coord=coord_vs[i, :] + slide_vec, xs=xs*2, ys=ys*2, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) single_loc_result = single_slide_mem_sp * ~ spa.SemanticPointer(data=item_vs[i, :]) single_loc_scaled_result = single_slide_scaled_mem_sp * ~ spa.SemanticPointer(data=item_vs[i, :]) # Only the first item has moved for the single movement case if i == 0: res_single_move_only = loc_match( sp=single_loc_result, heatmap_vectors=larger_heatmap_vectors, coord=coord_vs[i, :] + slide_vec, xs=xs*2, ys=ys*2, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) res_single += res_single_move_only res_single_scaled_move_only = loc_match( sp=single_loc_scaled_result, heatmap_vectors=larger_heatmap_vectors, coord=coord_vs[i, :] + slide_vec, xs=xs*2, ys=ys*2, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) res_single_scaled += res_single_scaled_move_only else: res_single += loc_match( sp=single_loc_result, heatmap_vectors=larger_heatmap_vectors, coord=coord_vs[i, :], xs=xs*2, ys=ys*2, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) res_single_scaled += loc_match( sp=single_loc_scaled_result, heatmap_vectors=larger_heatmap_vectors, coord=coord_vs[i, :], xs=xs*2, ys=ys*2, distance_threshold=0.5, sim_threshold=args.similarity_threshold, ) res_group /= n_items res_single /= n_items res_single_scaled /= n_items results['sliding_group'][n, s] = res_group results['sliding_object'][n, s] = res_single results['sliding_object_moved_only'][n, s] = res_single_move_only results['sliding_object_scaled'][n, s] = res_single_scaled results['sliding_object_scaled_moved_only'][n, s] = res_single_scaled_move_only np.savez( os.path.join(args.folder, fname), item_range=np.array(item_range), **results )