Esempio n. 1
0
def predict_double_query(obj_loc_memory, obj_memory, query_obj, dirs, query_obj2, dirs2, obj_dic, region_selector, loc_table):
    try: #in case of single query
        n = len(query_obj)
    except Exception:
        n = 1
        query_obj = [query_obj]
        query_obj2 = [query_obj2]
        obj_loc_memory = [obj_loc_memory]
        obj_memory = [obj_memory]
        dirs = [dirs] #might not need this one
        dirs2 = [dirs2] #might not need this one

    extracted_query_loc = [obj_loc_memory[i] * ~query_obj[i] for i in range(n)]
    extracted_query_loc = [spa.SemanticPointer(lookup_space_table(V, loc_table)) for V in extracted_query_loc]

    extracted_query_loc2 = [obj_loc_memory[i] * ~query_obj2[i] for i in range(n)]
    extracted_query_loc2 = [spa.SemanticPointer(lookup_space_table(V, loc_table)) for V in extracted_query_loc2]

    dir_regions = region_selector[dirs]
    dir_regions2 = region_selector[dirs2]
    query_region = [extracted_query_loc[i] * dir_regions[i] + extracted_query_loc2[i] * dir_regions2[i]  for i in range(n)]
    extract = np.array([obj_loc_memory[i] * ~ query_region[i] for i in range(n)])

    dots = np.array([obj_dic.dot(_) for _ in extract])

    # object_memory_raw = np.sum(obj_memory, axis=1)
    object_memory_raw = obj_memory
    object_memory = object_memory_raw - query_obj #eliminate one instance of queried object
    extract_objs = np.array([obj_dic.dot(_) > 0.8 for _ in object_memory])

    preds = dots * extract_objs
    obj_preds = np.argmax(preds, axis = 1)
    return obj_preds
Esempio n. 2
0
def orthogonal_hex_dir(
        phis=(np.pi / 2., np.pi / 10.),
        angles=(0, np.pi / 3.), even_dim=False):
    n_scales = len(phis)
    dim = 6 * n_scales + 1
    if even_dim:
        dim += 1

    xf = np.zeros((dim, ), dtype='Complex64')
    xf[0] = 1

    yf = np.zeros((dim, ), dtype='Complex64')
    yf[0] = 1

    if even_dim:
        xf[dim // 2] = 1
        yf[dim // 2] = 1

    for i in range(n_scales):
        phi_xs, phi_ys = get_sub_phi(phis[i], angles[i])
        xf[1 + i * 3:1 + (i + 1) * 3] = phi_xs
        yf[1 + i * 3:1 + (i + 1) * 3] = phi_ys

    xf[-1:dim // 2:-1] = np.conj(xf[1:(dim + 1) // 2])
    yf[-1:dim // 2:-1] = np.conj(yf[1:(dim + 1) // 2])

    X = np.fft.ifft(xf).real
    Y = np.fft.ifft(yf).real

    return spa.SemanticPointer(data=X), spa.SemanticPointer(data=Y)
Esempio n. 3
0
def generate_rectangle_region_old(x_range, y_range, X, Y, resolution = 100):
    fft_X = np.fft.fft(X.v)
    fft_Y = np.fft.fft(Y.v)

    phi = np.angle(fft_X)
    gamma = np.angle(fft_Y)
    assert np.allclose(np.abs(fft_X), 1)
    assert np.allclose(np.abs(fft_Y), 1)
    if any(phi == 0):
        # can't divide, just use summation
        region_analytic = np.zeros_like(X.v)
        for x in np.linspace(*x_range, resolution):
            for y in np.linspace(*y_range, resolution):
                region_analytic += encode_point(x, y, X, Y).v
        return spa.SemanticPointer(region_analytic/np.max(spatial_dot(region_analytic, np.linspace(*x_range,resolution/5), np.linspace(*y_range,resolution/5),X, Y)))
    else:
        # (FYI this is Euler's formula as we are applying it implicitly)
        # pi = phi * x1
        # assert np.allclose(fft_X ** x1, np.cos(pi) + 1j * np.sin(pi))
        INVPHI = spa.SemanticPointer(np.fft.ifft(1j / phi))
        INVGAMMA = spa.SemanticPointer(np.fft.ifft(1j / gamma))

        region_algebraic = (((power(X, x_range[1]) - power(X, x_range[0])) * INVPHI) *
                            (((power(Y, y_range[1]) - power(Y, y_range[0])) * INVGAMMA)))
        return region_algebraic
Esempio n. 4
0
def generate_item_memory(dim,
                         n_items,
                         limits,
                         x_axis_vec,
                         y_axis_vec,
                         normalize_memory=True):
    """
    Create a semantic pointer that contains a number of items bound with respective coordinates
    Returns the memory, along with a list of the items and coordinates used
    """

    # Start with an empty memory
    memory_sp = spa.SemanticPointer(data=np.zeros((dim)))
    coord_list = []
    item_list = []

    for n in range(n_items):
        # Generate random point
        x = np.random.uniform(low=limits[0], high=limits[1])
        y = np.random.uniform(low=limits[2], high=limits[3])
        pos = encode_point(x, y, x_axis=x_axis_vec, y_axis=y_axis_vec)

        # Generate random item
        item = spa.SemanticPointer(dim)

        # Add the item at the point to memory
        memory_sp += (pos * item)

        coord_list.append((x, y))
        item_list.append(item)

    if normalize_memory:
        memory_sp.normalize()

    return memory_sp, coord_list, item_list
Esempio n. 5
0
def get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp):
    """
    Precompute spatial semantic pointers for every location in the linspace
    Used to quickly compute heat maps by a simple vectorized dot product (matrix multiplication)
    """
    if x_axis_sp.__class__.__name__ == 'SemanticPointer':
        dim = len(x_axis_sp.v)
    else:
        dim = len(x_axis_sp)
        x_axis_sp = spa.SemanticPointer(data=x_axis_sp)
        y_axis_sp = spa.SemanticPointer(data=y_axis_sp)

    vectors = np.zeros((len(xs), len(ys), dim))

    for i, x in enumerate(xs):
        for j, y in enumerate(ys):
            p = encode_point(
                x=x,
                y=y,
                x_axis_sp=x_axis_sp,
                y_axis_sp=y_axis_sp,
            )
            vectors[i, j, :] = p.v

    return vectors
def get_fixed_dim_variable_sub_toriod_axes(
    dim=256,
    rng=np.random,
    eps=0.001,
):

    # total number of components that can be used
    n_circles = (dim - 1) // 2

    # Generate potiential sub-manifold dimensions.
    # Commonly selecting 3, but allowing higher dimensions
    # toroid_dims = np.ceil(1 + 2 * rng.poisson(1, size=n_circles))
    toroid_dims = np.floor(3 + rng.poisson(1, size=n_circles))

    # trim any excess, make the last dimension fit the remaining space
    cumulative = 0
    for i in range(n_circles):
        if cumulative + toroid_dims[i] >= n_circles:
            toroid_dims[i] = n_circles - cumulative
            toroid_dims = toroid_dims[:i + 1].copy()
            n_toroids = i + 1
            break
        else:
            cumulative += toroid_dims[i]

    # Randomly select the angle for each toroid
    angles = rng.uniform(0, 2 * np.pi, size=(n_toroids, ))

    scales = rng.uniform(-np.pi + eps, np.pi - eps, size=n_toroids)

    xf = np.ones((dim, ), dtype='complex64')
    yf = np.ones((dim, ), dtype='complex64')

    for n in range(n_toroids):
        n_proj = int(toroid_dims[n])
        phis_x, phis_y = get_proj_phi(n_proj=n_proj,
                                      phi=scales[n],
                                      angle=angles[n])
        xf[1 + n * n_proj:1 + (n + 1) * n_proj] = phis_x
        yf[1 + n * n_proj:1 + (n + 1) * n_proj] = phis_y

    # set the appropriate conjugates
    xf[-1:dim // 2:-1] = np.conj(xf[1:(dim + 1) // 2])
    yf[-1:dim // 2:-1] = np.conj(yf[1:(dim + 1) // 2])
    if dim % 2 == 0:
        xf[dim // 2] = 1
        yf[dim // 2] = 1

    assert np.allclose(np.abs(xf), 1)
    assert np.allclose(np.abs(yf), 1)
    x = np.fft.ifft(xf).real
    y = np.fft.ifft(yf).real
    assert np.allclose(np.fft.fft(x), xf)
    assert np.allclose(np.fft.fft(y), yf)
    assert np.allclose(np.linalg.norm(x), 1)
    assert np.allclose(np.linalg.norm(y), 1)

    return spa.SemanticPointer(x), spa.SemanticPointer(y)
def get_fixed_dim_sub_toriod_axes(
    dim=256,
    n_proj=3,
    scale_ratio=(1 + 5**0.5) / 2,
    scale_start_index=0,
    rng=np.random,
    eps=0.001,
):

    # number of toroids possible given the projection dimension
    # if this does not evenly divide dim, extra dimensions will be set to zero (1 in frequency domain)
    n_toroids = ((dim - 1) // 2) // n_proj

    # Randomly select the angle for each toroid
    angles = rng.uniform(0, 2 * np.pi, size=(n_toroids, ))

    # if the scale ration is set to 0, use a random distribution instead
    if scale_ratio == 0:
        scales = rng.uniform(-np.pi + eps, np.pi - eps, size=n_toroids)
    else:
        # create scales starting from the largest scale and moving down based on the ratio given with a start index of 0
        # the first scale will always be 2pi (minus epsilon to make the direction non-ambiguous)
        # this is effectively the finest resolution that can be detected (though the scale into this rep is arbitrary)
        scales = np.array([
            (np.pi - eps) / (scale_ratio**i)
            for i in range(scale_start_index, n_toroids + scale_start_index)
        ])

    xf = np.ones((dim, ), dtype='complex64')
    yf = np.ones((dim, ), dtype='complex64')

    for n in range(n_toroids):
        phis_x, phis_y = get_proj_phi(n_proj=n_proj,
                                      phi=scales[n],
                                      angle=angles[n])
        xf[1 + n * n_proj:1 + (n + 1) * n_proj] = phis_x
        yf[1 + n * n_proj:1 + (n + 1) * n_proj] = phis_y

    # set the appropriate conjugates
    xf[-1:dim // 2:-1] = np.conj(xf[1:(dim + 1) // 2])
    yf[-1:dim // 2:-1] = np.conj(yf[1:(dim + 1) // 2])
    if dim % 2 == 0:
        xf[dim // 2] = 1
        yf[dim // 2] = 1

    assert np.allclose(np.abs(xf), 1)
    assert np.allclose(np.abs(yf), 1)
    x = np.fft.ifft(xf).real
    y = np.fft.ifft(yf).real
    assert np.allclose(np.fft.fft(x), xf)
    assert np.allclose(np.fft.fft(y), yf)
    assert np.allclose(np.linalg.norm(x), 1)
    assert np.allclose(np.linalg.norm(y), 1)

    return spa.SemanticPointer(x), spa.SemanticPointer(y)
Esempio n. 8
0
 def sample(self):
     unif_dist = UniformHypersphere()
     pt = unif_dist.sample(1, self.nbasis) * (
         self.bounds[:, 1] - self.bounds[:, 0]) + self.bounds[:, 0]
     sample = spa.SemanticPointer(data=np.fft.ifft(
         np.fft.fft(self.basis[0], axis=0)**pt[0, 0], axis=0).real)
     for i in np.arange(1, self.nbasis):
         nextS = spa.SemanticPointer(data=np.fft.ifft(
             np.fft.fft(self.basis[i], axis=0)**pt[0, i], axis=0).real)
         sample = sample * nextS
     return sample.v
Esempio n. 9
0
    def __call__(self, t, x):

        output = spa.SemanticPointer(data=np.zeros(self.dim))
        for i in range(self.n_items):
            x_pos = x[i * 3]
            y_pos = x[i * 3 + 1]
            identity = np.clip(int(x[i * 3 + 2]), 0, self.n_vocab - 1)

            output += encode_point(x_pos, y_pos, self.x_axis_sp,
                                   self.y_axis_sp) * spa.SemanticPointer(
                                       data=self.vocab_vectors[identity])

        return output.normalized().v
Esempio n. 10
0
def plot_heatmap(X, Y, xs, ys, ax):
    sim = np.zeros((len(xs), len(ys)))

    for i, x in enumerate(xs):
        for j, y in enumerate(ys):
            sim[i, j] = encode_point(x, y, spa.SemanticPointer(data=X), spa.SemanticPointer(data=Y)).v[0]

    im = ax.imshow(sim, vmin=0, vmax=1)
    # ax.set_axis_off()
    ax.set_xticks([])
    ax.set_yticks([])

    return im
Esempio n. 11
0
 def samples(self, n):
     unif_dist = UniformHypersphere()
     pt = unif_dist.sample(n, self.nbasis) * (
         self.bounds[:, 1] - self.bounds[:, 0]) + self.bounds[:, 0]
     samples = np.zeros((n, self.dim))
     for j in np.arange(n):
         sample = spa.SemanticPointer(data=np.fft.ifft(
             np.fft.fft(self.basis[0], axis=0)**pt[j, 0], axis=0).real)
         for i in np.arange(1, self.nbasis):
             nextS = spa.SemanticPointer(data=np.fft.ifft(
                 np.fft.fft(self.basis[i], axis=0)**pt[j, i], axis=0).real)
             sample = sample * nextS
         samples[j, :] = sample.v
     return samples
def random_unitary(n_samples=1000, dim=3, version=1, eps=0.001):
    points = np.zeros((n_samples, dim))
    good = np.zeros((n_samples, ))

    for i in range(n_samples):
        if version == 1:
            sp = nengo_spa.SemanticPointer(data=np.random.randn(dim))
            sp = sp.normalized()
            sp = sp.unitary()
        elif version == 0:
            sp = spa.SemanticPointer(dim)
            sp.make_unitary()
        elif version == 2:
            sp = make_good_unitary(dim=dim)
        else:
            raise NotImplementedError

        points[i, :] = sp.v
        pf = np.fft.fft(points[i, :])
        if dim % 2 == 0:
            if np.abs(pf[0] - 1) < eps and np.abs(pf[dim // 2] - 1) < eps:
                good[i] = 1
        else:
            if np.abs(pf[0] - 1) < eps:
                good[i] = 1
    return points, good
Esempio n. 13
0
def predict_single_query(obj_loc_memory, obj_memory, query_obj, dirs, obj_dic, region_selector, loc_table):
    try: #in case of single query
        n = len(query_obj)
    except Exception:
        n = 1
        query_obj = [query_obj]
        obj_loc_memory = [obj_loc_memory]
        obj_memory = [obj_memory]
        dirs = [dirs] #might not need this one

    extracted_query_loc = [obj_loc_memory[i] * ~query_obj[i] for i in range(n)]
    extracted_query_loc = [spa.SemanticPointer(lookup_space_table(V, loc_table)) for V in extracted_query_loc]

    dir_regions = region_selector[dirs]
    query_region = [extracted_query_loc[i] * dir_regions[i] for i in range(n)]
    extract = np.array([obj_loc_memory[i] * ~ query_region[i] for i in range(n)])

    dots = np.array([obj_dic.dot(_) for _ in extract])

    # object_memory_raw = np.sum(obj_memory, axis=1)
    object_memory_raw = obj_memory
    object_memory = object_memory_raw - query_obj #eliminate one instance of queried object
    extract_objs = np.array([obj_dic.dot(_) > 0.8 for _ in object_memory])

    preds = np.where(extract_objs, dots, -1) #set non-present objects to -1
    obj_preds = np.argmax(preds, axis = 1)
    return obj_preds #, preds #return similarities too 
def generate_region_vector(desired,
                           xs,
                           ys,
                           x_axis_sp,
                           y_axis_sp,
                           normalize=True):
    """
    :param desired: occupancy grid of what points should be in the region and which ones should not be
    :param xs: linspace in x
    :param ys: linspace in y
    :param x_axis_sp: x axis semantic pointer
    :param y_axis_sp: y axis semantic pointer
    :return: a normalized semantic pointer designed to be highly similar to the desired region
    """

    vector = np.zeros_like((x_axis_sp.v))
    for i, x in enumerate(xs):
        for j, y in enumerate(ys):
            if desired[i, j] == 1:
                vector += encode_point(x, y, x_axis_sp, y_axis_sp).v

    sp = spa.SemanticPointer(data=vector)

    if normalize:
        try:
            sp = sp.normalized()
        except:
            sp.normalize()

    return sp
Esempio n. 15
0
def generate_rectangle_region(x_range, y_range, X, Y):
    #integrating eulor's formula e^ikx from a to b is i(e^ika - e^ikb)/k where e^ik is the axis vector and k is the angle of the vector
    fft_X = np.fft.fft(X.v)
    fft_Y = np.fft.fft(Y.v)

    phi = np.angle(fft_X)
    gamma = np.angle(fft_Y)
    assert np.allclose(np.abs(fft_X), 1)
    assert np.allclose(np.abs(fft_Y), 1)

    #masks to seperate when angle is 0, then you're integrating a constant so it's simply b-a
    phi_mask = phi != 0
    gamma_mask = gamma != 0

    x_integral = np.zeros_like(fft_X)
    y_integral = np.zeros_like(fft_Y)

    x_integral[phi_mask] = (fft_X ** x_range[1] - fft_X ** x_range[0])[phi_mask] * 1j / phi[phi_mask]
    x_integral[np.logical_not(phi_mask)] = x_range[1]-x_range[0]

    y_integral[gamma_mask] = (fft_Y ** y_range[1] - fft_Y ** y_range[0])[gamma_mask] * 1j / gamma[gamma_mask]
    y_integral[np.logical_not(gamma_mask)] = y_range[1]-y_range[0]

    rectangle = spa.SemanticPointer(np.fft.ifft(x_integral * y_integral))
    return rectangle
def create_superposition_vector(vocab, xs, ys, items_per_class_list):
    """generate superposition vector

    :vocab: instance of a nengo_spa vocabulary
    :xs: x coordinates of the positions
    :ys: y corrdinates of the positions
    :items_per_class_list: list of number that indicates how often one
                           particular class occurs in the superposition vector
    :returns: superposition vector as nengo_spa.SemanticPointer object

    """
    lens = np.array([len(xs), len(ys), np.sum(items_per_class_list)])
    if ~np.all(lens[0] == lens):
        raise ValueError('input array have different lengths')

    num = len(xs)
    sup = spa.SemanticPointer(np.zeros(vocab.dimensions))
    class_ind = 0
    added = np.zeros(len(items_per_class_list))

    for i, (x, y) in enumerate(zip(xs, ys)):
        sup += vocab['C%s'%str(class_ind).zfill(len(str(num)))] * \
            power(vocab['X'], x) * power(vocab['Y'], y)
        added[class_ind] += 1
        if added[class_ind] == items_per_class_list[class_ind]:
            class_ind += 1

    return sup
Esempio n. 17
0
def make_good_unitary_old(D, eps=np.pi * 1e-3, n_trials=10000):
    for _ in range(n_trials):
        d = spa.Vocabulary(D)
        sp = d.create_pointer().unitary()
        a = np.angle(np.fft.fft(sp.v))
        if np.all(np.abs(a) > eps):
            return spa.SemanticPointer(sp.v)
    raise RuntimeError("bleh")
def get_proj_phi(n_proj, phi, angle):
    # get the phis corresponding to an orthogonal N-D axis projection onto a 2-D space
    # TODO: this function can be made much more efficient

    # minimal dimensionality required for this representation
    dim = n_proj * 2 + 1

    # make an axis for each n_proj
    axes_f = np.zeros((n_proj, dim), dtype='complex64')
    axes = np.zeros((
        n_proj,
        dim,
    ))
    axis_sps = []
    for k in range(n_proj):
        axes_f[k, :] = 1
        axes_f[k, k + 1] = np.exp(1.j * phi)
        axes_f[k, -(k + 1)] = np.conj(axes_f[k, k + 1])
        axes[k, :] = np.fft.ifft(axes_f[k, :]).real

        assert np.allclose(np.abs(axes_f[k, :]), 1)
        assert np.allclose(np.fft.fft(axes[k, :]), axes_f[k, :])
        assert np.allclose(np.linalg.norm(axes[k, :]), 1)

        axis_sps.append(spa.SemanticPointer(data=axes[k, :]))

    points_nd = np.zeros((n_proj + 1, n_proj))
    points_nd[:n_proj, :] = np.eye(n_proj) * np.sqrt(n_proj)
    # points in 2D that will correspond to each axis, plus one at zero
    # the zero isn't really necessary, but doesn't hurt
    points_2d = np.zeros((n_proj + 1, 2))
    if n_proj != 2:
        thetas = np.linspace(0, 2 * np.pi, n_proj + 1)[:-1] + angle
    else:  # special case for 2D, don't want polar opposite axes
        thetas = np.linspace(0, np.pi, n_proj + 1)[:-1] + angle

    for i, theta in enumerate(thetas):
        points_2d[i, 0] = np.cos(theta)
        points_2d[i, 1] = np.sin(theta)

    transform_mat = np.linalg.lstsq(points_2d, points_nd)

    # apply scaling to the axes based on the singular values. Both should be the same
    # note: this scaling seems only relevant to matching the effective random scaling, and needed on both sides anyway
    x_axis = transform_mat[0][0, :]  #/ transform_mat[3][0]
    y_axis = transform_mat[0][1, :]  #/ transform_mat[3][1]

    X = power(axis_sps[0], x_axis[0])
    Y = power(axis_sps[0], y_axis[0])
    for i in range(1, n_proj):
        X *= power(axis_sps[i], x_axis[i])
        Y *= power(axis_sps[i], y_axis[i])

    xf = np.fft.fft(X.v)
    yf = np.fft.fft(Y.v)

    return xf[1:1 + n_proj], yf[1:1 + n_proj]
Esempio n. 19
0
    def observation(self, obs):
        img = obs['image']

        M = spa.SemanticPointer(data=np.zeros(self.d))
        for i in range(img.shape[0]):
            for j in range(img.shape[1]):
                obj = img[i, j, 0]
                color = img[i, j, 1]
                state = img[i, j, 2]
                if obj not in [0, 1]:
                    S = spa.SemanticPointer(data=self.S_list[i, j, :])

                    M = M + (S * self.vocab[IDX_TO_OBJECT[obj].upper()] *
                             self.vocab[IDX_TO_COLOR[color].upper()] *
                             self.vocab[IDX_TO_STATE[state].upper()])
        #M = M.normalized()

        return {'mission': obs['mission'], 'image': M.v}
def get_axes(dim=256, n=3, seed=13, period=0, optimal_phi=False):
    """
    Get X and Y axis vectors based on an n dimensional projection.
    If spacing is non-zero, they will be periodic with the given spacing
    :param dim:
    :param n:
    :param seed:
    :param spacing:
    :return:
    """
    rng = np.random.RandomState(seed=seed)

    # # Length of the normal vector to the plane
    # len_normal = np.linalg.norm(np.ones((n,)) * 1./n)
    # # pythagorean theorem to find the length along the axis, assuming in the 2D space the length to the point is one
    # len_axis = np.sqrt(len_normal**2 + 1)

    points_nd = np.eye(n)  #* np.sqrt(n)
    # points in 2D that will correspond to each axis, plus one at zero
    points_2d = np.zeros((n, 2))
    thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
    # TODO: will want a scaling here, or along the high dim axes
    for i, theta in enumerate(thetas):
        points_2d[i, 0] = np.cos(theta)
        points_2d[i, 1] = np.sin(theta)

    transform_mat = np.linalg.lstsq(points_2d, points_nd)

    x_axis = transform_mat[0][0, :]
    y_axis = transform_mat[0][1, :]

    axis_sps = []
    for i in range(n):
        if period == 0:
            if optimal_phi:
                # unitary vector with unique phi for every element
                # periodicity depends on the dimensionality
                axis_sps.append(make_optimal_periodic_axis(dim, rng=rng))
            else:
                # random unitary vector
                axis_sps.append(make_good_unitary(dim, rng=rng))
        else:
            # unitary vector of fixed dimensionality and fixed periodicity
            axis_sps.append(
                spa.SemanticPointer(data=make_fixed_dim_periodic_axis(
                    dim=dim, period=period, rng=rng)))

    X = power(axis_sps[0], x_axis[0])
    Y = power(axis_sps[0], y_axis[0])
    for i in range(1, n):
        X *= power(axis_sps[i], x_axis[i])
        Y *= power(axis_sps[i], y_axis[i])

    return X, Y
Esempio n. 21
0
def unitary_by_phi(phis):
    vec_dim = len(phis)*2+1
    vf = np.ones((vec_dim, ), dtype='complex64')
    vf[0] = 1
    vf[1:(dim + 1) // 2] = np.exp(1.j*phis)
    vf[-1:dim // 2:-1] = np.conj(vf[1:(dim + 1) // 2])

    assert np.allclose(np.abs(vf), 1)
    v = np.fft.ifft(vf).real
    assert np.allclose(np.fft.fft(v), vf)
    assert np.allclose(np.linalg.norm(v), 1)
    return spa.SemanticPointer(v)
def encode_random(x, y, dim=512, convert_to_sp=False):
    """
    Used for comparison with SSPs. A deterministic random encoding of a location to a semantic pointer
    """
    # convert x and y into a single float
    f = x * 1000 + y
    # convert the float into an unsigned integer to be used as a seed
    seed = struct.unpack('>I', struct.pack('>f', f))[0]
    rstate = np.random.RandomState(seed)
    vec = rstate.normal(size=dim)
    vec = vec / np.linalg.norm(vec)
    if convert_to_sp:
        return spa.SemanticPointer(data=vec)
    else:
        return vec
def get_similarity_df(dim, n_comb, operation, seed, vocab):
    """calculate a pandas data frame with similarities for each of
    the dimension over a certain number of samples

    :dim: the vector dimensions to be evaluated
    :n_comb: the number of combinations (items to combine using the operation)
    :operation: 'superpos' or 'binding'
    :seed: random number seed
    :vocab: vector vocabulary
    :returns: a pandas dataframe containing all similarities

    """
    data = []
    d = np.zeros(dim)
    if operation != 'superpos':
        d[0] = 1
    superpos = spa.SemanticPointer(d)

    sims = np.zeros(2 * n_comb)

    for v in np.arange(2 * n_comb):
        if v < n_comb:
            k = 'C%s' % str(v).zfill(len(str(2 * n_comb)))
            if operation == 'superpos':
                superpos += vocab[k]
            else:
                superpos = superpos * vocab[k]

    for v in np.arange(2 * n_comb):
        k = 'C%s' % str(v).zfill(len(str(2 * n_comb)))
        sims[v] = np.abs(superpos.compare(vocab[k]))

        val = 'member'
        if v >= n_comb:
            val = 'no member'

        data.append(
            dict(dimension=dim,
                 similarity=sims[v],
                 combinations=n_comb,
                 operation=operation,
                 val=val,
                 seed=seed,
                 sample=v))

    return pd.DataFrame(data)
Esempio n. 24
0
def random_unitary(n_samples=1000, dim=3, version=2):
    points = np.zeros((n_samples, dim))

    for i in range(n_samples):
        if version == 1:
            sp = nengo_spa.SemanticPointer(data=np.random.randn(dim))
            sp = sp.normalized()
            sp = sp.unitary()
        elif version == 0:
            sp = spa.SemanticPointer(dim)
            sp.make_unitary()
        elif version == 2:
            sp = make_good_unitary(dim=dim)
        else:
            raise NotImplementedError

        points[i, :] = sp.v
    return points
Esempio n. 25
0
def make_good_unitary(dim, eps=1e-3, rng=np.random):
    # created by arvoelke
    a = rng.rand((dim - 1) // 2)
    sign = rng.choice((-1, +1), len(a))
    phi = sign * np.pi * (eps + a * (1 - 2 * eps))
    assert np.all(np.abs(phi) >= np.pi * eps)
    assert np.all(np.abs(phi) <= np.pi * (1 - eps))

    fv = np.zeros(dim, dtype='complex64')
    fv[0] = 1
    fv[1:(dim + 1) // 2] = np.cos(phi) + 1j * np.sin(phi)
    fv[-1:dim // 2:-1] = np.conj(fv[1:(dim + 1) // 2])
    if dim % 2 == 0:
        fv[dim // 2] = 1

    assert np.allclose(np.abs(fv), 1)
    v = np.fft.ifft(fv)
    # assert np.allclose(v.imag, 0, atol=1e-5)
    v = v.real
    assert np.allclose(np.fft.fft(v), fv)
    assert np.allclose(np.linalg.norm(v), 1)
    return spa.SemanticPointer(v).unitary()
def make_optimal_periodic_axis(dim=128, eps=1e-3, phase=0, rng=np.random):
    spacing = dim // 2

    phi = np.linspace(0, np.pi, spacing + 1)[1:-1]

    # Randomize the order of the phi, so that different vectors can be created
    rng.shuffle(phi)

    assert np.all(np.abs(phi) >= np.pi * eps)
    assert np.all(np.abs(phi) <= np.pi * (1 - eps))

    fv = np.zeros(dim, dtype='complex64')
    fv[0] = 1
    fv[1:(dim + 1) // 2] = np.cos(phi + phase) + 1j * np.sin(phi + phase)
    fv[-1:dim // 2:-1] = np.conj(fv[1:(dim + 1) // 2])
    if dim % 2 == 0:
        fv[dim // 2] = 1

    assert np.allclose(np.abs(fv), 1)
    v = np.fft.ifft(fv)
    v = v.real
    assert np.allclose(np.fft.fft(v), fv)
    assert np.allclose(np.linalg.norm(v), 1)
    return spa.SemanticPointer(v)
Esempio n. 27
0
coarse_xs = np.linspace(xs[0], xs[-1], coarse_size)
coarse_ys = np.linspace(ys[0], ys[-1], coarse_size)

map_array = coarse_mazes[args.maze_index, :, :]
limit_low = 0
limit_high = 13
encoding_func, repr_dim = get_encoding_function(args,
                                                limit_low=limit_low,
                                                limit_high=limit_high)

# x_axis_sp = spa.SemanticPointer(data=data['x_axis_sp'])
# y_axis_sp = spa.SemanticPointer(data=data['y_axis_sp'])
x_axis_vec = encoding_func(1, 0)
y_axis_vec = encoding_func(0, 1)
x_axis_sp = nengo_spa.SemanticPointer(data=x_axis_vec)
y_axis_sp = nengo_spa.SemanticPointer(data=y_axis_vec)
heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)
coarse_heatmap_vectors = get_heatmap_vectors(coarse_xs, coarse_ys, x_axis_sp,
                                             y_axis_sp)

# fixed random set of locations for the goals
limit_range = xs[-1] - xs[0]

goal_sps = data['goal_sps']
goals = data['goals']
# print(np.min(goals))
# print(np.max(goals))
goals_scaled = ((goals - xs[0]) / limit_range) * coarse_size
# print(np.min(goals_scaled))
# print(np.max(goals_scaled))
        "Lake": locs[5, :],
        "Bush": locs[6, :]
    })

randomize = False
# item_vocab = spa.Vocabulary(args.dim, randomize=randomize)
item_vocab = spa.Vocabulary(args.dim)

item_vocab.populate(';'.join(list(items.keys())))

limit = 5
res = 256
xs = np.linspace(-limit, limit, res)
ys = np.linspace(-limit, limit, res)

mem_sp = spa.SemanticPointer(data=np.zeros((args.dim, )))
for key, value in items.items():
    # mem_sp += item_vocab[key] * encode_point_hex(value[0], value[1], X, Y, Z)
    mem_sp += item_vocab[key] * encode_point(value[0], value[1], X, Y)


def encode_func(pos):
    # return encode_point_hex(pos[0], pos[1], X, Y, Z).v
    return encode_point(pos[0], pos[1], X, Y).v


# xs = np.linspace(-1, args.env_size+1, 256)
if not os.path.exists('hmv_attractor_exp_{}.npz'.format(args.dim)):
    # hmv = get_heatmap_vectors_hex(xs, xs, X, Y, Z)
    hmv = get_heatmap_vectors(xs, xs, X, Y)
    np.savez('hmv_exp_{}.npz'.format(args.dim), hmv=hmv)
Esempio n. 29
0
def experiment(dim=512,
               n_hierarchy=3,
               n_items=16,
               seed=0,
               limit=5,
               res=128,
               thresh=0.5,
               neural=False,
               neurons_per_dim=25,
               time_per_item=1.0,
               max_items=100):
    rng = np.random.RandomState(seed=seed)

    X, Y = get_fixed_dim_sub_toriod_axes(
        dim=dim,
        n_proj=3,
        scale_ratio=0,
        scale_start_index=0,
        rng=rng,
        eps=0.001,
    )

    xs = np.linspace(-limit, limit, res)
    ys = np.linspace(-limit, limit, res)
    hmv = get_heatmap_vectors(xs, ys, X, Y)

    item_vecs = rng.normal(size=(n_items, dim))
    for i in range(n_items):
        item_vecs[i, :] = item_vecs[i, :] / np.linalg.norm(item_vecs[i, :])

    locations = rng.uniform(low=-limit, high=limit, size=(n_items, 2))

    if n_hierarchy == 1:  # no hierarchy case

        # Encode items into memory
        mem = np.zeros((dim, ))
        for i in range(n_items):
            mem += (spa.SemanticPointer(data=item_vecs[i, :]) *
                    encode_point(locations[i, 0], locations[i, 1], X, Y)).v
        mem /= np.linalg.norm(mem)

        mem_sp = spa.SemanticPointer(data=mem)

        estims = np.zeros((
            n_items,
            dim,
        ))
        sims = np.zeros((n_items, ))
        if neural:
            # save time for very large numbers of items
            n_exp_items = min(n_items, max_items)
            estims = np.zeros((
                n_exp_items,
                dim,
            ))
            sims = np.zeros((n_exp_items, ))

            model = nengo.Network(seed=seed)
            with model:
                input_node = nengo.Node(
                    lambda t: item_vecs[int(np.floor(t)) % n_items, :],
                    size_in=0,
                    size_out=dim)
                mem_node = nengo.Node(mem, size_in=0, size_out=dim)

                cconv = nengo.networks.CircularConvolution(
                    n_neurons=neurons_per_dim, dimensions=dim, invert_b=True)

                nengo.Connection(mem_node, cconv.input_a)
                nengo.Connection(input_node, cconv.input_b)

                out_node = nengo.Node(size_in=dim, size_out=0)

                nengo.Connection(cconv.output, out_node)

                p_out = nengo.Probe(out_node, synapse=0.01)

            sim = nengo.Simulator(model)
            sim.run(n_exp_items * time_per_item)

            output_data = sim.data[p_out]
            timesteps_per_item = int(time_per_item / 0.001)

            # timestep offset to cancel transients
            offset = 100
            for i in range(n_exp_items):
                estims[i, :] = output_data[i * timesteps_per_item +
                                           offset:(i + 1) *
                                           timesteps_per_item, :].mean(axis=0)
                sims[i] = np.dot(
                    estims[i, :],
                    encode_point(locations[i, 0], locations[i, 1], X, Y).v)

            pred_locs = ssp_to_loc_v(estims, hmv, xs, ys)

            errors = np.linalg.norm(pred_locs - locations[:n_exp_items, :],
                                    axis=1)

            accuracy = len(np.where(errors < thresh)[0]) / n_items

            rmse = np.sqrt(np.mean(errors**2))

            sim = np.mean(sims)
        else:
            # retrieve items
            for i in range(n_items):
                estims[i, :] = (mem_sp *
                                ~spa.SemanticPointer(data=item_vecs[i, :])).v

                sims[i] = np.dot(
                    estims[i, :],
                    encode_point(locations[i, 0], locations[i, 1], X, Y).v)

            pred_locs = ssp_to_loc_v(estims, hmv, xs, ys)

            errors = np.linalg.norm(pred_locs - locations, axis=1)

            accuracy = len(np.where(errors < thresh)[0]) / n_items

            rmse = np.sqrt(np.mean(errors**2))

            sim = np.mean(sims)

    elif n_hierarchy == 2:
        # TODO: generate vocab and input sequences

        n_ids = int(np.sqrt(n_items))
        f_n_ids = np.sqrt(n_items)

        id_vecs = rng.normal(size=(n_ids, dim))
        for i in range(n_ids):
            id_vecs[i, :] = id_vecs[i, :] / np.linalg.norm(id_vecs[i, :])

        # items to be included in each ID vec
        item_sums = np.zeros((n_ids, dim))
        item_loc_sums = np.zeros((n_ids, dim))
        for i in range(n_items):
            id_ind = min(i // n_ids, n_ids - 1)
            # id_ind = min(int(i / f_n_ids), n_ids - 1)
            item_sums[id_ind, :] += item_vecs[i, :]
            item_loc_sums[id_ind, :] += (
                spa.SemanticPointer(data=item_vecs[i, :]) *
                encode_point(locations[i, 0], locations[i, 1], X, Y)).v

        # Encode id_vecs into memory, each id is bound to something that has similarity to all items in the ID's map
        mem = np.zeros((dim, ))
        for i in range(n_ids):
            # normalize previous memories
            item_sums[i, :] = item_sums[i, :] / np.linalg.norm(item_sums[i, :])
            item_loc_sums[i, :] = item_loc_sums[i, :] / np.linalg.norm(
                item_loc_sums[i, :])

            mem += (spa.SemanticPointer(data=id_vecs[i, :]) *
                    spa.SemanticPointer(data=item_sums[i, :])).v
        mem /= np.linalg.norm(mem)

        mem_sp = spa.SemanticPointer(data=mem)

        estims = np.zeros((
            n_items,
            dim,
        ))
        sims = np.zeros((n_items, ))

        # retrieve items
        for i in range(n_items):
            # noisy ID for the map with this item
            estim_id = (mem_sp * ~spa.SemanticPointer(data=item_vecs[i, :])).v

            # get closest clean match
            id_sims = np.zeros((n_ids, ))
            for j in range(n_ids):
                id_sims[j] = np.dot(estim_id, id_vecs[j, :])

            best_ind = np.argmax(id_sims)

            # clean_id = id_vecs[best_ind, :]

            # item_loc_sums comes from the associative mapping from clean_id

            estims[i, :] = (
                spa.SemanticPointer(data=item_loc_sums[best_ind, :]) *
                ~spa.SemanticPointer(data=item_vecs[i, :])).v

            sims[i] = np.dot(
                estims[i, :],
                encode_point(locations[i, 0], locations[i, 1], X, Y).v)

        pred_locs = ssp_to_loc_v(estims, hmv, xs, ys)

        errors = np.linalg.norm(pred_locs - locations, axis=1)

        accuracy = len(np.where(errors < thresh)[0]) / n_items

        rmse = np.sqrt(np.mean(errors**2))

        sim = np.mean(sims)

    elif n_hierarchy == 3:
        # n_ids = int(np.cbrt(n_items))
        f_n_ids = np.cbrt(n_items)
        n_ids = int(np.ceil(np.cbrt(n_items)))
        n_ids_inner = int(np.ceil(np.sqrt(n_items / n_ids)))
        # f_n_ids = np.cbrt(n_items)

        id_outer_vecs = rng.normal(size=(n_ids, dim))
        id_inner_vecs = rng.normal(size=(n_ids_inner, dim))
        for i in range(n_ids):
            id_outer_vecs[i, :] = id_outer_vecs[i, :] / np.linalg.norm(
                id_outer_vecs[i, :])
            # for j in range(n_ids):
            #     id_inner_vecs[i*n_ids+j, :] = id_inner_vecs[i*n_ids+j, :] / np.linalg.norm(id_inner_vecs[i*n_ids+j, :])
        for i in range(n_ids_inner):
            id_inner_vecs[i, :] = id_inner_vecs[i, :] / np.linalg.norm(
                id_inner_vecs[i, :])

        # items to be included in each ID vec
        item_outer_sums = np.zeros((n_ids, dim))
        # item_inner_sums = np.zeros((n_ids*n_ids, dim))
        item_inner_sums = np.zeros((n_ids_inner, dim))
        item_loc_outer_sums = np.zeros((n_ids, dim))
        # item_loc_inner_sums = np.zeros((n_ids*n_ids, dim))
        item_loc_inner_sums = np.zeros((n_ids_inner, dim))
        for i in range(n_items):

            id_outer_ind = min(int(i / (f_n_ids * f_n_ids)), n_ids - 1)
            id_inner_ind = min(int(i / f_n_ids), n_ids_inner - 1)

            item_outer_sums[id_outer_ind, :] += item_vecs[i, :]
            item_inner_sums[id_inner_ind, :] += item_vecs[i, :]

            item_loc_outer_sums[id_outer_ind, :] += (
                spa.SemanticPointer(data=item_vecs[i, :]) *
                encode_point(locations[i, 0], locations[i, 1], X, Y)).v
            item_loc_inner_sums[id_inner_ind, :] += (
                spa.SemanticPointer(data=item_vecs[i, :]) *
                encode_point(locations[i, 0], locations[i, 1], X, Y)).v

        # Encode id_vecs into memory, each id is bound to something that has similarity to all items in the ID's map
        mem_outer = np.zeros((dim, ))
        mem_inner = np.zeros((
            n_ids,
            dim,
        ))
        for i in range(n_ids):
            # normalize previous memories
            item_outer_sums[i, :] = item_outer_sums[i, :] / np.linalg.norm(
                item_outer_sums[i, :])
            item_loc_outer_sums[i, :] = item_loc_outer_sums[
                i, :] / np.linalg.norm(item_loc_outer_sums[i, :])

            mem_outer += (spa.SemanticPointer(data=id_outer_vecs[i, :]) *
                          spa.SemanticPointer(data=item_outer_sums[i, :])).v

        for j in range(n_ids_inner):
            # normalize previous memories
            item_inner_sums[j, :] = item_inner_sums[j, :] / np.linalg.norm(
                item_inner_sums[j, :])
            item_loc_inner_sums[j, :] = item_loc_inner_sums[
                j, :] / np.linalg.norm(item_loc_inner_sums[j, :])

            i = min(int(j / n_ids), n_ids - 1)

            mem_inner[i, :] += (
                spa.SemanticPointer(data=id_inner_vecs[j, :]) *
                spa.SemanticPointer(data=item_inner_sums[j, :])).v

            mem_inner[i, :] /= np.linalg.norm(mem_inner[i, :])
        mem_outer /= np.linalg.norm(mem_outer)

        mem_outer_sp = spa.SemanticPointer(data=mem_outer)

        estims = np.zeros((
            n_items,
            dim,
        ))
        sims = np.zeros((n_items, ))

        if neural:
            # time for each item, in seconds
            time_per_item = 1.0
            model = nengo.Network(seed=seed)
            with model:
                inp_node = nengo.Node('?', size_in=0, size_out=dim)

                estim_outer_id = nengo.Ensemble(dimension=dim,
                                                n_neurons=dim *
                                                neurons_per_dim)

                out_node = nengo.Node(size_in=dim, size_out=0)

                p_out = nengo.Probe(out_node, synapse=0.01)

            sim = nengo.Simulator(model)
            sim.run(n_items * time_per_item)
        else:
            # non-neural version

            # retrieve items
            for i in range(n_items):
                # noisy outer ID for the map with this item
                estim_outer_id = (mem_outer_sp *
                                  ~spa.SemanticPointer(data=item_vecs[i, :])).v

                # get closest clean match
                id_sims = np.zeros((n_ids))
                for j in range(n_ids):
                    id_sims[j] = np.dot(estim_outer_id, id_outer_vecs[j, :])

                best_ind = np.argmax(id_sims)

                # noisy inner ID for the map with this item
                estim_inner_id = (
                    spa.SemanticPointer(data=mem_inner[best_ind, :]) *
                    ~spa.SemanticPointer(data=item_vecs[i, :])).v

                # get closest clean match
                id_sims = np.zeros((n_ids_inner))
                for j in range(n_ids_inner):
                    id_sims[j] = np.dot(estim_inner_id, id_inner_vecs[j, :])

                best_ind = np.argmax(id_sims)

                # item_loc_sums comes from the associative mapping from clean_id

                estims[i, :] = (spa.SemanticPointer(
                    data=item_loc_inner_sums[best_ind, :]) *
                                ~spa.SemanticPointer(data=item_vecs[i, :])).v

                sims[i] = np.dot(
                    estims[i, :],
                    encode_point(locations[i, 0], locations[i, 1], X, Y).v)

        pred_locs = ssp_to_loc_v(estims, hmv, xs, ys)

        errors = np.linalg.norm(pred_locs - locations, axis=1)

        accuracy = len(np.where(errors < thresh)[0]) / n_items

        rmse = np.sqrt(np.mean(errors**2))

        sim = np.mean(sims)
    else:
        # 4 split hierarchy

        vocab = spa.Vocabulary(dimensions=dim,
                               pointer_gen=np.random.RandomState(seed=seed))
        filler_id_keys = []
        filler_keys = []
        mapping = {}

        items_left = n_items
        n_levels = 0
        while items_left > 1:
            n_levels += 1
            items_left /= 4

        print(n_levels)

        # Location Values, labelled SSP
        for i in range(n_items):
            # vocab.populate('Item{}'.format(i))
            vocab.add('Loc{}'.format(i),
                      encode_point(locations[i, 0], locations[i, 1], X, Y).v)

        # level IDs, e.g. CITY, PROVINCE, COUNTRY
        for i in range(n_levels):
            vocab.populate('LevelSlot{}.unitary()'.format(i))
            # sp = spa.SemanticPointer()

        # Item IDs, e.g. Waterloo_ID
        for i in range(n_items):
            vocab.populate('ItemID{}.unitary()'.format(i))

        # level labels (fillers for level ID slots), e.g. Waterloo_ID, Ontario_ID, Canada_ID
        for i in range(n_levels):
            for j in range(int(n_items / (4**(n_levels - i - 1)))):
                vocab.populate('LevelFillerID{}_{}.unitary()'.format(i, j))
                # filler_id_keys.append('LevelFillerID{}_{}'.format(i, j))
                # filler_keys.append('LevelFiller{}_{}'.format(i, j))
                # mapping['LevelFillerID{}_{}'.format(i, j)] = 'LevelFiller{}_{}'.format(i, j)

        # Second last level with item*location pairs
        for i in range(int(n_items / 4)):
            id_str = []
            for k in range(n_levels - 1):
                id_str.append('LevelSlot{} * LevelFillerID{}_{}'.format(
                    k, k, int(i * 4 / (4**(n_levels - k - 1)))))

            data_str = []
            for j in range(4):
                ind = i * 4 + j
                data_str.append('ItemID{}*Loc{}'.format(ind, ind))
                vocab.populate('Item{} = ({}).normalized()'.format(
                    # i, ' + '.join(id_str + ['LevelSlot{} * LevelFillerID{}_{}'.format(n_levels - 2, n_levels - 2, j)])
                    ind,
                    ' + '.join(id_str + [
                        'LevelSlot{} * LevelFillerID{}_{}'.format(
                            n_levels - 1, n_levels - 1, j)
                    ])))

            # vocab.populate('LevelFiller{}_{} = {}'.format(n_levels - 1, i, ' + '.join(data_str)))
            vocab.populate('LevelFiller{}_{} = ({}).normalized()'.format(
                n_levels - 2, i, ' + '.join(data_str)))

            # only appending the ones used
            filler_id_keys.append('LevelFillerID{}_{}'.format(n_levels - 2, i))
            filler_keys.append('LevelFiller{}_{}'.format(n_levels - 2, i))
            mapping['LevelFillerID{}_{}'.format(
                n_levels - 2, i)] = 'LevelFiller{}_{}'.format(n_levels - 2, i)

        print(sorted(list(vocab.keys())))

        # Given each ItemID, calculate the corresponding Loc
        # Can map from ItemID{X} -> Item{X}
        # Query based on second last levelID to get the appropriate LevelFillerID
        # map from LevelFillerID -> LevelFiller
        # do the query LevelFiller *~ ItemID{X} to get Loc{X}

        possible_level_filler_id_vecs = np.zeros((int(n_items / 4), dim))
        for i in range(int(n_items / 4)):
            possible_level_filler_id_vecs[i] = vocab[
                'LevelFillerID{}_{}'.format(n_levels - 2, i)].v

        estims = np.zeros((
            n_items,
            dim,
        ))
        sims = np.zeros((n_items, ))

        if neural:
            # save time for very large numbers of items
            n_exp_items = min(n_items, max_items)
            estims = np.zeros((
                n_exp_items,
                dim,
            ))
            sims = np.zeros((n_exp_items, ))

            filler_id_vocab = vocab.create_subset(keys=filler_id_keys)
            filler_vocab = vocab.create_subset(keys=filler_keys)
            filler_all_vocab = vocab.create_subset(keys=filler_keys +
                                                   filler_id_keys)

            model = nengo.Network(seed=seed)
            with model:
                # The changing item query. Full expanded item, not just ID
                item_input_node = nengo.Node(lambda t: vocab['Item{}'.format(
                    int(np.floor(t)) % n_items)].v,
                                             size_in=0,
                                             size_out=dim)
                # item_input_node = spa.Transcode(lambda t: 'Item{}'.format(int(np.floor(t))), output_vocab=vocab)

                # The ID for the changing item query
                item_id_input_node = nengo.Node(lambda t: vocab[
                    'ItemID{}'.format(int(np.floor(t)) % n_items)].v,
                                                size_in=0,
                                                size_out=dim)
                # item_id_input_node = spa.Transcode(lambda t: 'ItemID{}'.format(int(np.floor(t))), output_vocab=vocab)

                # Fixed memory based on the level slot to access
                level_slot_input_node = nengo.Node(
                    lambda t: vocab['LevelSlot{}'.format(n_levels - 2)].v,
                    size_in=0,
                    size_out=dim)

                model.cconv_noisy_level_filler = nengo.networks.CircularConvolution(
                    n_neurons=neurons_per_dim * 2,
                    dimensions=dim,
                    invert_b=True)

                nengo.Connection(item_input_node,
                                 model.cconv_noisy_level_filler.input_a)
                nengo.Connection(level_slot_input_node,
                                 model.cconv_noisy_level_filler.input_b)

                # Note: this is set up as heteroassociative between ID and the content (should clean up as well)
                model.noisy_level_filler_id_cleanup = spa.ThresholdingAssocMem(
                    threshold=0.4,
                    input_vocab=filler_id_vocab,
                    output_vocab=filler_vocab,
                    # mapping=vocab.keys(),
                    mapping=mapping,
                    function=lambda x: x > 0.)

                nengo.Connection(model.cconv_noisy_level_filler.output,
                                 model.noisy_level_filler_id_cleanup.input)

                model.cconv_location = nengo.networks.CircularConvolution(
                    n_neurons=neurons_per_dim * 2,
                    dimensions=dim,
                    invert_b=True)

                nengo.Connection(model.noisy_level_filler_id_cleanup.output,
                                 model.cconv_location.input_a)
                nengo.Connection(item_id_input_node,
                                 model.cconv_location.input_b)

                out_node = nengo.Node(size_in=dim, size_out=0)

                nengo.Connection(model.cconv_location.output, out_node)

                p_out = nengo.Probe(out_node, synapse=0.01)

            sim = nengo.Simulator(model)
            sim.run(n_exp_items * time_per_item)

            output_data = sim.data[p_out]
            timesteps_per_item = int(time_per_item / 0.001)

            # timestep offset to cancel transients
            offset = 100
            for i in range(n_exp_items):
                estims[i, :] = output_data[i * timesteps_per_item +
                                           offset:(i + 1) *
                                           timesteps_per_item, :].mean(axis=0)
                sims[i] = np.dot(
                    estims[i, :],
                    encode_point(locations[i, 0], locations[i, 1], X, Y).v)

            pred_locs = ssp_to_loc_v(estims, hmv, xs, ys)

            errors = np.linalg.norm(pred_locs - locations[:n_exp_items, :],
                                    axis=1)

            accuracy = len(np.where(errors < thresh)[0]) / n_items

            rmse = np.sqrt(np.mean(errors**2))

            sim = np.mean(sims)
        else:
            # non-neural version

            # retrieve items
            for i in range(n_items):
                noisy_level_filler_id = vocab['Item{}'.format(
                    i)] * ~vocab['LevelSlot{}'.format(n_levels - 2)]
                # cleanup filler id
                n_fillers = int(n_items / 4)
                sim = np.zeros((n_fillers, ))
                for j in range(n_fillers):
                    sim[j] = np.dot(noisy_level_filler_id.v,
                                    possible_level_filler_id_vecs[j, :])

                filler_id_ind = np.argmax(sim)

                # query the appropriate filler
                loc_estim = vocab['LevelFiller{}_{}'.format(
                    n_levels - 2,
                    filler_id_ind)] * ~vocab['ItemID{}'.format(i)]

                estims[i, :] = loc_estim.v

                sims[i] = np.dot(
                    estims[i, :],
                    encode_point(locations[i, 0], locations[i, 1], X, Y).v)

            pred_locs = ssp_to_loc_v(estims, hmv, xs, ys)

            errors = np.linalg.norm(pred_locs - locations, axis=1)

            accuracy = len(np.where(errors < thresh)[0]) / n_items

            rmse = np.sqrt(np.mean(errors**2))

            sim = np.mean(sims)

    return rmse, accuracy, sim
def power(s, e):
    x = np.fft.ifft(np.fft.fft(s.v)**e).real
    return spa.SemanticPointer(data=x)