Ejemplo n.º 1
0
 def ssp_enc_func(coords, dim=256, seed=13):
     rng = np.random.RandomState(seed)
     x_axis_sp = make_good_unitary(dim, rng=rng)
     y_axis_sp = make_good_unitary(dim, rng=rng)
     return encode_point(
         x=coords[0], y=coords[1], x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp,
     ).v
Ejemplo n.º 2
0
def test_generate_maze_sp(size=10,
                          limit_low=-5,
                          limit_high=5,
                          res=64,
                          dim=512,
                          seed=13):
    from spatial_semantic_pointers.utils import make_good_unitary, get_heatmap_vectors
    from spatial_semantic_pointers.plots import plot_heatmap

    rng = np.random.RandomState(seed=seed)

    x_axis_sp = make_good_unitary(dim=dim, rng=rng)
    y_axis_sp = make_good_unitary(dim=dim, rng=rng)

    xs = np.linspace(limit_low, limit_high, res)
    ys = np.linspace(limit_low, limit_high, res)

    sp, maze, fine_maze = generate_maze_sp(size,
                                           xs,
                                           ys,
                                           x_axis_sp,
                                           y_axis_sp,
                                           normalize=True,
                                           obstacle_ratio=.2,
                                           map_style='blocks')

    fig, ax = plt.subplots(1, 4)

    ax[0].imshow(maze)
    ax[1].imshow(fine_maze)
    heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)
    plot_heatmap(sp.v,
                 heatmap_vectors,
                 ax[2],
                 xs,
                 ys,
                 name='',
                 vmin=-1,
                 vmax=1,
                 cmap='plasma',
                 invert=True)
    plot_heatmap(sp.v,
                 heatmap_vectors,
                 ax[3],
                 xs,
                 ys,
                 name='',
                 vmin=None,
                 vmax=None,
                 cmap='plasma',
                 invert=True)

    plt.show()
Ejemplo n.º 3
0
def debug():
    seed = 14
    dim = 256
    # dim = 1024
    res = 256 #32
    res = 32

    rng = np.random.RandomState(seed=seed)

    X, Y = get_fixed_dim_sub_toriod_axes(
        dim=dim,
        n_proj=3,
        scale_ratio=0,
        scale_start_index=0,
        rng=rng,
        eps=0.001,
    )

    X, Y = get_axes_from_network(fname='learned_ssp_models/model_1layer_1024hs_seed1.pt', dim=dim)

    # X, Y = get_axes(dim=dim, n=3, seed=13, period=0, optimal_phi=False)

    X = make_good_unitary(dim, rng=rng)
    Y = make_good_unitary(dim, rng=rng)

    # X = make_fixed_dim_periodic_axis(
    #     dim=dim, period=6, phase=0, frequency=1, eps=1e-3, rng=rng, flip=False, random_phases=False,
    # )
    # Y = make_fixed_dim_periodic_axis(
    #     dim=dim, period=6, phase=0, frequency=1, eps=1e-3, rng=rng, flip=False, random_phases=False,
    # )

    md, phi_pos = phi_mag_and_dir(X.v, Y.v)
    # md, phi_pos = phi_mag_and_dir(X, Y)

    fig, ax = plt.subplots(1, 5, figsize=(10, 3))

    magnitude_histogram(md, ax[0])
    direction_histogram(md, ax[1])

    mag_dir_histogram(md, ax[2], res=res)
    mag_dir_histogram(phi_pos, ax[3], res=res)
    gauss_image(phi_pos, ax[4], sigma=.15)

    print("Mean Location: ({},{})".format(
        np.round(np.mean(phi_pos[:, 0]), 2), np.round(np.mean(phi_pos[:, 1]), 2)
    ))

    plt.show()
Ejemplo n.º 4
0
def ssp_encoding_func(seed=13, dim=512, ssp_scaling=1):
    rng = np.random.RandomState(seed=seed)

    x_axis_sp = make_good_unitary(dim=dim, rng=rng)
    y_axis_sp = make_good_unitary(dim=dim, rng=rng)

    def encoding_func(positions):
        return encode_point(
            x=positions[0]*ssp_scaling,
            y=positions[1]*ssp_scaling,
            x_axis_sp=x_axis_sp,
            y_axis_sp=y_axis_sp
        ).v

    return encoding_func
Ejemplo n.º 5
0
def encode_dataset(data, dim=256, seed=13, scale=1.0):
    """
    :param data: the data to be encoded
    :param dim: dimensionality of the SSP
    :param seed: seed for the single axis vector
    :param scale: scaling of the data for the encoding
    :return:
    """
    rng = np.random.RandomState(seed=seed)
    # TODO: have option to normalize everything first, for consistent relative scale
    axis_vec = make_good_unitary(dim, rng=rng)

    n_samples = data.shape[0]
    n_features = data.shape[1]

    n_out_features = n_features * dim

    data_out = np.zeros((n_samples, n_out_features))

    for s in range(n_samples):
        for f in range(n_features):
            data_out[s, f * dim:(f + 1) * dim] = power(axis_vec,
                                                       data[s, f] * scale).v

    return data_out
Ejemplo n.º 6
0
def get_nd_simplex_encoding_func(n, dim, seed=13):

    transform_axes = get_simplex_coordinates(n)

    rng = np.random.RandomState(seed=seed)

    axis_vectors = []
    for i in range(n + 1):
        axis_vectors.append(make_good_unitary(dim, rng=rng))

    def encoding_func(features):
        """
        Take in 'n' features as a numpy array, and output a 'dim' dimensional SSP
        """
        # TODO: any scaling required?
        # TODO: make sure matrix multiply order is correct
        tranformed_features = transform_axes @ features

        vec = power(axis_vectors[0], tranformed_features[0])
        for i in range(1, n + 1):
            vec *= power(axis_vectors[i], tranformed_features[i])

        return vec.v

    return encoding_func
def random_unitary(n_samples=1000, dim=3, version=1, eps=0.001):
    points = np.zeros((n_samples, dim))
    good = np.zeros((n_samples, ))

    for i in range(n_samples):
        if version == 1:
            sp = nengo_spa.SemanticPointer(data=np.random.randn(dim))
            sp = sp.normalized()
            sp = sp.unitary()
        elif version == 0:
            sp = spa.SemanticPointer(dim)
            sp.make_unitary()
        elif version == 2:
            sp = make_good_unitary(dim=dim)
        else:
            raise NotImplementedError

        points[i, :] = sp.v
        pf = np.fft.fft(points[i, :])
        if dim % 2 == 0:
            if np.abs(pf[0] - 1) < eps and np.abs(pf[dim // 2] - 1) < eps:
                good[i] = 1
        else:
            if np.abs(pf[0] - 1) < eps:
                good[i] = 1
    return points, good
Ejemplo n.º 8
0
def unitary_points(dim, n_samples, good_unitary=False):
    points = np.zeros((n_samples, dim))
    for i in range(n_samples):
        if good_unitary:
            points[i, :] = make_good_unitary(dim=dim).v
        else:
            sp = spa.SemanticPointer(dim)
            sp.make_unitary()
            points[i, :] = sp.v
    return points
Ejemplo n.º 9
0
def get_ssp_encoding_func(dim, scale, seed, **_):

    rng = np.random.RandomState(seed=seed)

    axis_vec = make_good_unitary(dim, rng=rng)

    def encoding_func(feature):
        return power(axis_vec, feature * scale).v

    return encoding_func
Ejemplo n.º 10
0
def unitary_determinant_test(dim=2, n_samples=1000):
    for i in range(n_samples):
        if True:
            vec = make_good_unitary(dim=dim).v
            print(vec)
        else:
            sp = spa.SemanticPointer(dim)
            sp.make_unitary()
            vec = sp.v
        if not np.allclose(np.dot(circulant(vec)[0], circulant(vec)[1]), 0):
            print(np.dot(circulant(vec)[0], circulant(vec)[1]))
            print(np.linalg.det(circulant(vec)))
        # assert np.allclose(np.dot(circulant(vec)[0], circulant(vec)[1]), 0)
        assert np.allclose(np.abs(np.linalg.det(circulant(vec))), 1)
    return True
Ejemplo n.º 11
0
def get_heatmap_vectors_n(xs, ys, n, seed=13, dim=512):
    """
    Precompute spatial semantic pointers for every location in the linspace
    Used to quickly compute heat maps by a simple vectorized dot product (matrix multiplication)
    """
    rng = np.random.RandomState(seed=seed)
    axis_sps = []
    for i in range(n):
        axis_sps.append(make_good_unitary(dim, rng=rng))

    vectors = np.zeros((len(xs), len(ys), dim))

    N = len(axis_sps)

    # points_nd = np.zeros((N + 1, N))
    # points_nd[:N, :] = np.eye(N)
    # # points in 2D that will correspond to each axis, plus one at zero
    # points_2d = np.zeros((N + 1, 2))

    points_nd = np.eye(N) * np.sqrt(N)
    # points in 2D that will correspond to each axis
    points_2d = np.zeros((N, 2))

    thetas = np.linspace(0, 2 * np.pi, N + 1)[:-1]
    # TODO: will want a scaling here, or along the high dim axes
    for i, theta in enumerate(thetas):
        points_2d[i, 0] = np.cos(theta)
        points_2d[i, 1] = np.sin(theta)

    transform_mat = np.linalg.lstsq(points_2d, points_nd)

    # apply scaling to the axes based on the singular values. Both should be the same
    x_axis = transform_mat[0][0, :] / transform_mat[3][0]
    y_axis = transform_mat[0][1, :] / transform_mat[3][1]

    for i, x in enumerate(xs):
        for j, y in enumerate(ys):
            # Note: needed to divide by sqrt(2) in the scaling here to get it to match with the 2D and regular hex 3D method.
            p = encode_point_n(x=x * transform_mat[3][0] / np.sqrt(2),
                               y=y * transform_mat[3][0] / np.sqrt(2),
                               axis_sps=axis_sps,
                               x_axis=x_axis,
                               y_axis=y_axis)
            vectors[i, j, :] = p.v

    # also return the axis_sps so individual points can be generated
    return vectors, axis_sps
Ejemplo n.º 12
0
def random_unitary(n_samples=1000, dim=3, version=2):
    points = np.zeros((n_samples, dim))

    for i in range(n_samples):
        if version == 1:
            sp = nengo_spa.SemanticPointer(data=np.random.randn(dim))
            sp = sp.normalized()
            sp = sp.unitary()
        elif version == 0:
            sp = spa.SemanticPointer(dim)
            sp.make_unitary()
        elif version == 2:
            sp = make_good_unitary(dim=dim)
        else:
            raise NotImplementedError

        points[i, :] = sp.v
    return points
Ejemplo n.º 13
0
def get_nd_encoding_func(n, dim, seed=13):

    rng = np.random.RandomState(seed=seed)

    axis_vectors = []
    for i in range(n):
        axis_vectors.append(make_good_unitary(dim, rng=rng))

    def encoding_func(features):
        """
        Take in 'n' features as a numpy array, and output a 'dim' dimensional SSP
        """

        vec = power(axis_vectors[0], features[0])
        for i in range(1, n):
            vec *= power(axis_vectors[i], features[i])

        return vec.v

    return encoding_func
Ejemplo n.º 14
0
def get_axes_and_scale(dim=256, n=3, seed=13, apply_scaling=True):
    """
    Get X and Y axis vectors based on an n dimensional projection.
    Also return the correct scaling so the size of the bump in the heatmap is consistent
    """
    rng = np.random.RandomState(seed=seed)

    points_nd = np.eye(n) * np.sqrt(n)
    # points in 2D that will correspond to each axis, plus one at zero
    points_2d = np.zeros((n, 2))
    thetas = np.linspace(0, 2 * np.pi, n + 1)[:-1]
    # TODO: will want a scaling here, or along the high dim axes
    for i, theta in enumerate(thetas):
        points_2d[i, 0] = np.cos(theta)
        points_2d[i, 1] = np.sin(theta)

    transform_mat = np.linalg.lstsq(points_2d, points_nd)

    x_axis = transform_mat[0][0, :]
    y_axis = transform_mat[0][1, :]

    if apply_scaling:
        x_axis /= transform_mat[3][0]
        y_axis /= transform_mat[3][1]

    axis_sps = []
    for i in range(n):
        # random unitary vector
        axis_sps.append(make_good_unitary(dim, rng=rng))

    X = power(axis_sps[0], x_axis[0])
    Y = power(axis_sps[0], y_axis[0])
    for i in range(1, n):
        X *= power(axis_sps[i], x_axis[i])
        Y *= power(axis_sps[i], y_axis[i])

    return X, Y, transform_mat[3][0]
version = 2
n_samples = 25000  #10000
dim = 5  #3

points = np.zeros((n_samples, dim))

for i in range(n_samples):
    if version == 1:
        sp = nengo_spa.SemanticPointer(data=np.random.randn(dim))
        sp = sp.normalized()
        sp = sp.unitary()
    elif version == 0:
        sp = spa.SemanticPointer(dim)
        sp.make_unitary()
    elif version == 2:
        sp = make_good_unitary(dim=dim)

    points[i, :] = sp.v

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')


def orthogonal_dir_unitary(dim=5, phi=np.pi / 2.):
    xf = np.zeros((dim, ), dtype='Complex64')
    xf[0] = 1
    xf[1] = np.exp(1.j * phi)
    xf[2] = 1
    xf[3] = 1
    xf[4] = np.exp(-1.j * phi)
Ejemplo n.º 16
0
        else:
            raise NotImplementedError

        points[i, :] = sp.v
    return points


# unitaries = [
#     unitary(np.pi/3., +1),
#     unitary(-np.pi/8., +1),
#     unitary(np.pi/3., -1),
#     unitary(np.pi/8., -1),
# ]

unitaries = [
    make_good_unitary(dim=5, rng=np.random.RandomState(seed=13)).v,
    make_good_unitary(dim=11, rng=np.random.RandomState(seed=13)).v,
    make_good_unitary(dim=32, rng=np.random.RandomState(seed=14)).v,
    make_good_unitary(dim=128, rng=np.random.RandomState(seed=13)).v,
]

titles = [
    'Dim = 5',
    'Dim = 11',
    'Dim = 32',
    'Dim = 128',
]

palette = sns.color_palette("hls", len(unitaries))

# for shading the space
def main():

    parser = argparse.ArgumentParser(
        'Traverse many graphs with and SSP algorithm and report metrics')

    parser.add_argument('--n-samples',
                        type=int,
                        default=5,
                        help='Number of different graphs to test')
    parser.add_argument('--seed',
                        type=int,
                        default=13,
                        help='Seed for training and generating axis SSPs')
    parser.add_argument('--dim',
                        type=int,
                        default=512,
                        help='Dimensionality of the SSPs')
    parser.add_argument('--res',
                        type=int,
                        default=128,
                        help='Resolution of the linspaces used')
    parser.add_argument('--normalize',
                        type=int,
                        default=1,
                        choices=[0, 1],
                        help='Whether or not to normalize SPs')
    parser.add_argument(
        '--diameter-increment',
        type=float,
        default=1.0,
        help='How much to expand ellipse diameter by on each step')

    args = parser.parse_args()

    # Convert to boolean
    args.normalize = args.normalize == 1

    # Metrics
    # set to 1 if the found path is the shortest
    shortest_path = np.zeros((args.n_samples))

    # set to 1 if the path found is valid (only uses connections that exist)
    valid_path = np.zeros((args.n_samples))

    # set to 1 if any path is found in the time allotted
    found_path = np.zeros((args.n_samples))

    np.random.seed(args.seed)

    xs = np.linspace(0, 10, args.res)
    ys = np.linspace(0, 10, args.res)

    x_axis_sp = make_good_unitary(args.dim)
    y_axis_sp = make_good_unitary(args.dim)

    heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)

    # TEMP: putting this outside the loop for debugging
    graph_params = generate_graph(dim=args.dim,
                                  x_axis_sp=x_axis_sp,
                                  y_axis_sp=y_axis_sp)

    for n in range(args.n_samples):

        print("Sample {} of {}".format(n + 1, args.n_samples))

        # graph_params = generate_graph(dim=args.dim, x_axis_sp=x_axis_sp, y_axis_sp=y_axis_sp)

        elliptic_expansion = EllipticExpansion(
            x_axis_sp=x_axis_sp,
            y_axis_sp=y_axis_sp,
            xs=xs,
            ys=ys,
            heatmap_vectors=heatmap_vectors,
            diameter_increment=args.diameter_increment,
            normalize=args.normalize,
            debug_mode=True,
            **graph_params)

        path = elliptic_expansion.find_path(
            max_steps=10,  #15,#20,
            display=False,
            graph=graph_params['graph'],
            xs=xs,
            ys=ys,
            heatmap_vectors=heatmap_vectors)

        optimal_path = graph_params['graph'].search_graph(
            start_node=graph_params['start_landmark_id'],
            end_node=graph_params['end_landmark_id'],
        )

        print("found path is: {}".format(path))
        print("optimal path is: {}".format(optimal_path))

        if path is not None:
            found_path[n] = 1

            if graph_params['graph'].is_valid_path(path):
                valid_path[n] = 1
                print("path is valid")
            else:
                print("path is invalid")

            if path == optimal_path:
                shortest_path[n] = 1
                print("path is optimal")
            else:
                print("path is not optimal")

    print("Found path: {}".format(found_path.mean()))
    print("Valid path: {}".format(valid_path.mean()))
    print("Shortest path: {}".format(shortest_path.mean()))
Ejemplo n.º 18
0
xs = np.linspace(-args.limit, args.limit, args.res)
ys = np.linspace(-args.limit, args.limit, args.res)

# if the data already exists, just load it
if os.path.exists(fname):
    data = np.load(fname)
    square_heatmaps = data['square_heatmaps']
    hex_heatmaps = data['hex_heatmaps']
else:

    for seed in range(args.n_seeds):
        print("\x1b[2K\r Seed {} of {}".format(seed + 1, args.n_seeds),
              end="\r")
        rng = np.random.RandomState(seed=seed)
        X = make_good_unitary(args.dim, rng=rng)
        Y = make_good_unitary(args.dim, rng=rng)
        Z = make_good_unitary(args.dim, rng=rng)

        axes[seed, 0, :] = X.v
        axes[seed, 1, :] = Y.v
        axes[seed, 2, :] = Z.v

        square_heatmaps[seed, :, :] = np.tensordot(origin_vec,
                                                   get_heatmap_vectors(
                                                       xs=xs,
                                                       ys=ys,
                                                       x_axis_sp=X,
                                                       y_axis_sp=Y),
                                                   axes=([0], [2]))
Ejemplo n.º 19
0
# data = np.load(fname)
# square_heatmaps = data['square_heatmaps']
# hex_heatmaps = data['hex_heatmaps']
#
# avg_square_heatmap = square_heatmaps.mean(axis=0)
# avg_hex_heatmap = hex_heatmaps.mean(axis=0)

dim = 512
res = 256
limit = 5
xs = np.linspace(-limit, limit, res)

Xh, Yh = get_axes(dim=dim, seed=13)

rng = np.random.RandomState(seed=13)
X = make_good_unitary(dim=dim, rng=rng)
Y = make_good_unitary(dim=dim, rng=rng)
Z = make_good_unitary(dim=dim, rng=rng)

fig, ax = plt.subplots(1, 3, figsize=(8, 4))

sigma_normal = 0.5
sigma_hex = 0.5
sigma_hex_c = 0.5

sim_hex = np.zeros((res, ))
sim_hex_c = np.zeros((res, ))  # this version has axes generated together and then converted to 2D
sim_normal = np.zeros((res, ))
gauss_hex = gaussian_1d(0, sigma_hex, xs)
gauss_hex_c = gaussian_1d(0, sigma_hex_c, xs)
gauss_normal = gaussian_1d(0, sigma_normal, xs)
Ejemplo n.º 20
0
import nengo.spa as spa
import numpy as np
from spatial_semantic_pointers.utils import encode_point, make_good_unitary, get_heatmap_vectors
from spatial_semantic_pointers.plots import SpatialHeatmap
from spatial_semantic_pointers.networks.ssp_cleanup import SpatialCleanup

seed = 13
dim = 512
limit = 5
res = 256

model = spa.SPA(seed=seed)

rstate = np.random.RandomState(seed=13)

x_axis_sp = make_good_unitary(dim, rng=rstate)
y_axis_sp = make_good_unitary(dim, rng=rstate)

xs = np.linspace(-limit, limit, res)
ys = np.linspace(-limit, limit, res)

n_cconv_neurons = 50  #10

#ssp_cleanup_path = '/home/bjkomer/metric-representation/metric_representation/pytorch/ssp_cleanup_cosine_15_items/May07_15-21-15/model.pt'
ssp_cleanup_path = '/home/ctnuser/metric-representation/metric_representation/pytorch/ssp_cleanup_cosine_15_items/May07_15-21-15/model.pt'


def angle_to_ssp(x):

    return encode_point(np.cos(x), np.sin(x), x_axis_sp, y_axis_sp).v
Ejemplo n.º 21
0
    for i, x in enumerate(xs):
        for j, y in enumerate(ys):
            sim[i, j] = np.dot(
                point,
                np.fft.ifft(np.fft.fft(X)**x * np.fft.fft(Y)**y).real)

    return sim


# X = unitary(np.pi/2., 0)
# Y = unitary(0, np.pi/2.)

rng = np.random.RandomState(seed=13)
dim = 128
X = make_good_unitary(dim=dim, rng=rng).v
Y = make_good_unitary(dim=dim, rng=rng).v

# dim = 32
# X = orthogonal_unitary(dim=dim, index=dim//4, phi=np.pi/2.)
# Y = orthogonal_unitary(dim=dim, index=dim//4+1, phi=np.pi/2.)

pos = [.5, .25]
pos = [2, 1]

# representation of the point as an SSP
point = np.fft.ifft(np.fft.fft(X)**pos[0] * np.fft.fft(Y)**pos[1]).real

limit = 5
xs = np.linspace(-limit, limit, 64)
ys = np.linspace(-limit, limit, 64)
Ejemplo n.º 22
0
# np.random.seed(13)
# np.random.seed(17)
np.random.seed(42)

dim = 512
# dim = 1024

res = 128
xs = np.linspace(0, 10, res)
ys = np.linspace(0, 10, res)

# These will include the space that is the difference between any two nodes
xs_larger = np.linspace(-10, 10, res)
ys_larger = np.linspace(-10, 10, res)

x_axis_sp = make_good_unitary(dim)
y_axis_sp = make_good_unitary(dim)

heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp, y_axis_sp)
heatmap_vectors_larger = get_heatmap_vectors(xs_larger, ys_larger, x_axis_sp,
                                             y_axis_sp)

# Map
map_sp = spa.SemanticPointer(data=np.zeros((dim, )))
# version of the map with landmark IDs bound to each location
landmark_map_sp = spa.SemanticPointer(data=np.zeros((dim, )))

# Connectivity
# contains each connection egocentrically
con_ego_sp = spa.SemanticPointer(data=np.zeros((dim, )))
# contains each connection allocentrically
Ejemplo n.º 23
0
    if args.dim == 256:
        ssp_cleanup_path = '/home/ctnuser/ssp-navigation/ssp_navigation/trained_models/ssp_cleanup/hex-ssp/d256/May23_13-54-52/model.pt'
    elif args.dim == 512:
        ssp_cleanup_path = '/home/ctnuser/ssp-navigation/ssp_navigation/trained_models/ssp_cleanup/hex-ssp/d512/May23_14-48-23/model.pt'
    elif args.dim == 1024:
        ssp_cleanup_path = '/home/ctnuser/ssp-navigation/ssp_navigation/trained_models/ssp_cleanup/hex-ssp/d1024/May23_16-11-12/model.pt'
else:
    ssp_cleanup_path = '/home/ctnuser/metric-representation/metric_representation/pytorch/ssp_cleanup_cosine_15_items/May07_15-21-15/model.pt'
    seed = 13
    dim = 512
    limit = 5
    res = 256

    rstate = np.random.RandomState(seed=13)

    X = make_good_unitary(dim, rng=rstate)
    Y = make_good_unitary(dim, rng=rstate)

    xs = np.linspace(-limit, limit, res)
    ys = np.linspace(-limit, limit, res)

pytorch_cleanup = True

if not pytorch_cleanup:
    # generate a dataset for the cleanup function
    cache_fname = 'cleanup_dataset_{}.npz'.format(dim)
    if os.path.exists(cache_fname):
        data = np.load(cache_fname)
        clean_vectors = data['clean_vectors']
        noisy_vectors = data['noisy_vectors']
    else:
if __name__ == '__main__':
    args = parser.parse_args()
else:
    args = parser.parse_args([])

neurons_per_dim = 5
n_neurons = args.dim * args.neurons_per_dim

xs = np.linspace(-args.limit, args.limit, args.res)
ys = np.linspace(-args.limit, args.limit, args.res)

# X, Y = get_axes(dim=dim, n=3, seed=13, period=0, optimal_phi=False)

# temporarily using these to match the axis vectors the cleanup was trained on
rstate = np.random.RandomState(seed=13)
X = make_good_unitary(args.dim, rng=rstate)
Y = make_good_unitary(args.dim, rng=rstate)


def angle_to_ssp(x):

    return encode_point(np.cos(x), np.sin(x), X, Y).v


heatmap_vectors = get_heatmap_vectors(xs, ys, X, Y)

rng = np.random.RandomState(seed=13)
preferred_locations = hilbert_2d(-args.limit,
                                 args.limit,
                                 n_neurons,
                                 rng,
Ejemplo n.º 25
0
neurons_per_dim = 5
n_neurons = dim * neurons_per_dim
n_cconv_neurons = neurons_per_dim * 2

# preferred_locations = hilbert_2d(pc_limit_low, pc_limit_high, n_neurons, rng, p=8, N=2, normal_std=3)
preferred_locations = hilbert_2d(pc_limit_low,
                                 pc_limit_high,
                                 n_neurons,
                                 rng,
                                 p=10,
                                 N=2,
                                 normal_std=3)

# item_sp = nengo.spa.SemanticPointer(dim)
item_sp = make_good_unitary(dim)
# item_sp = encode_point(1, 1, X, Y)


def input_func(t):
    index = int(np.floor(t / dt))
    pos = positions[index % (n_samples - 1)]
    pos_ssp = encode_point(pos[0], pos[1], X, Y)
    # item = item_sp

    if diff_axis:
        bound = encode_point(pos[0], pos[1], X_new, Y_new)
    else:
        bound = item_sp * pos_ssp

    if index > n_samples - 1:
Ejemplo n.º 26
0
def get_env(seed=13, dim=512):

    rstate = np.random.RandomState(seed=seed)
    x_axis_sp = make_good_unitary(dim=dim, rng=rstate)
    y_axis_sp = make_good_unitary(dim=dim, rng=rstate)

    map_array = np.array([
        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
        [1, 0, 1, 0, 0, 1, 0, 0, 0, 1],
        [1, 0, 1, 1, 0, 0, 0, 0, 0, 1],
        [1, 0, 0, 1, 0, 0, 1, 1, 0, 1],
        [1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
        [1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
        [1, 0, 1, 0, 1, 0, 0, 0, 0, 1],
        [1, 1, 1, 0, 0, 0, 1, 1, 0, 1],
        [1, 1, 0, 0, 0, 0, 1, 0, 0, 1],
        [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
    ])

    # Parameters to define the environment to use
    params = {
        'x_axis_vec': x_axis_sp,
        'y_axis_vec': y_axis_sp,
        'full_map_obs': False,
        'pob': 0,
        'max_sensor_dist': 10,
        'n_sensors': 36,
        'fov': 360,
        'normalize_dist_sensors': True,
        'n_grid_cells': 0,
        'bc_n_ring': 0,
        'bc_n_rad': 0,
        'bc_dist_rad': 0,
        'bc_receptive_field_min': 0,
        'bc_receptive_field_max': 0,
        'hd_n_cells': 0,
        'hd_receptive_field_min': 0,
        'hd_receptive_field_max': 0,
        'heading': 'circular',
        'location': 'none',
        'goal_loc': 'none',
        'goal_vec': 'none',
        'goal_csp': True,
        'agent_csp': True,
        'goal_csp_egocentric': True,
        'csp_dim': dim,
    }

    obs_dict = generate_obs_dict(params)

    csp_offset = map_array.shape[0] / 2
    csp_scaling = 5 / (map_array.shape[0] / 2)

    env = GridWorldEnv(
        map_array=map_array,
        # object_locations=object_locations,
        observations=obs_dict,
        continuous=True,
        movement_type='holonomic',
        dt=0.1,
        max_steps=1000,
        fixed_episode_length=False,
        max_lin_vel=5,
        max_ang_vel=5,
        screen_width=300,
        screen_height=300,
        csp_scaling=csp_offset,
        csp_offset=csp_scaling,
    )
    return env
Ejemplo n.º 27
0
def final():
    fig, ax = plt.subplots(5, 4, figsize=(8, 6), tight_layout=True)
    fig2, ax2 = plt.subplots(5, 3, figsize=(5, 4), tight_layout=True)

    dim = 256
    res = 32
    sigma = .15
    n_toroid = (dim - 1) // 2

    for type_index in range(5):
        phi_mag_total = np.zeros((n_toroid*3, 1))
        for seed in range(3):
            if type_index == 0:
                # random SSP
                rng = np.random.RandomState(seed=seed)
                X = make_good_unitary(dim, rng=rng)
                Y = make_good_unitary(dim, rng=rng)

                md, phi_pos = phi_mag_and_dir(X.v, Y.v)
            elif type_index == 1:
                # sub-toroid SSP
                rng = np.random.RandomState(seed=seed)
                X, Y = get_fixed_dim_sub_toriod_axes(
                    dim=dim,
                    n_proj=3,
                    scale_ratio=0,
                    scale_start_index=0,
                    rng=rng,
                    eps=0.001,
                )

                md, phi_pos = phi_mag_and_dir(X.v, Y.v)
            elif type_index == 2:
                # learned SSP no regularization
                X, Y = get_axes_from_network(
                    fname='learned_ssp_models/no_reg_model_1layer_1024hs_seed{}.pt'.format(seed),
                    # fname='learned_ssp_models/reg_proper_model_1layer_1024hs_seed{}.pt'.format(seed),
                    # fname='learned_ssp_models/reg_proper_noise_model_1layer_1024hs_seed{}.pt'.format(seed),
                    # fname='learned_ssp_models/reg_proper_phi_decay_model_1layer_1024hs_seed{}.pt'.format(seed),
                    dim=dim
                )
                md, phi_pos = phi_mag_and_dir(X, Y)
            elif type_index == 3:
                # learned SSP just weight regularization
                X, Y = get_axes_from_network(
                    fname='learned_ssp_models/reg_proper_model_1layer_1024hs_seed{}.pt'.format(seed),
                    dim=dim
                )
                md, phi_pos = phi_mag_and_dir(X, Y)
            elif type_index == 4:
                # learned SSP weight and phi regularization
                X, Y = get_axes_from_network(
                    fname='learned_ssp_models/reg_proper_phi_decay_model_1layer_1024hs_seed{}.pt'.format(seed),
                    dim=dim
                )
                md, phi_pos = phi_mag_and_dir(X, Y)

            gauss_image(phi_pos, ax[type_index, seed], sigma=sigma, res=res)
            if type_index < 2:
                X = X.v
                Y = Y.v
            im = plot_heatmap(X, Y, np.linspace(-5, 5, 128), np.linspace(-5, 5, 128), ax2[type_index, seed])
            phi_mag_total[seed * n_toroid:(seed + 1) * n_toroid, 0] = md[:, 0]

            if seed == 0:
                if type_index == 0:
                    # label = 'Fixed SSP'
                    label = 'A     '
                elif type_index == 1:
                    # label = 'Fixed Grid SSP'
                    label = 'B     '
                elif type_index == 2:
                    # label = 'Learned SSP'
                    label = 'C     '
                elif type_index == 3:
                    # label = 'Learned SSP'
                    label = 'D     '
                elif type_index == 4:
                    # label = 'Learned SSP'
                    label = 'E     '
                # ax[type_index, seed].set_ylabel(
                #     label, rotation=90, fontsize=18,
                #     # position=(0, .4)
                # )
                ax[type_index, seed].set_ylabel(
                    label, rotation=0, fontsize=18,
                    position=(0, .4)
                )
                ax2[type_index, seed].set_ylabel(
                    label, rotation=0, fontsize=18,
                    position=(0, .4)
                )

        # histogram at the bottom
        magnitude_histogram(phi_mag_total, ax[type_index, 3])

        # if type_index == 0:
        #
        #     ax[type_index, 1].set_title('Phi Magnitude and Direction')
        #
        #     ax[type_index, 3].set_title('Phi Magnitude Histogram')

    # cbar_ax = fig2.add_axes([0.85, 0.05, 0.05, 0.85])
    # add_axes([left, bottom, width, height])
    cbar_ax = fig2.add_axes([0.85, 0.05, 0.05, 0.90])
    fig.colorbar(im, cax=cbar_ax)

    plt.show()
Ejemplo n.º 28
0
            vec[(i - j) % dim] += mat[i, j]

    vec /= dim

    return vec


rng = np.random.RandomState(seed=args.seed)

xs = np.linspace(args.limit_low, args.limit_high, args.res)

axis_vector_type = 'pca'

if axis_vector_type == 'standard':
    dim = 256
    X = make_good_unitary(dim, rng=rng)
    # X = spa.SemanticPointer(dim)
    # X.make_unitary()

    X_circ = circulant(X.v)
    X_vec = circulant_matrix_to_vec(X_circ)

    # assert (np.all(X_vec == X.v))
elif axis_vector_type == 'covariance':
    # generate SSP based on a given circulant matrix
    data = np.load(args.fname)
    X_circ = data['covariance']
    dim = X_circ.shape[0]
    X_vec = circulant_matrix_to_vec(X_circ)
    X = spa.SemanticPointer(data=X_vec)
Ejemplo n.º 29
0
                         traj_folder_name)

if not os.path.exists(traj_path):
    os.makedirs(traj_path)

traj_name = os.path.join(traj_path, 'trajectory_dataset.npz')

limit_low = 0
limit_high = args.maze_size

if not os.path.exists(base_name):
    print("Generating base maze data")

    rng = np.random.RandomState(seed=args.seed)

    x_axis_sp = make_good_unitary(dim=args.dim, rng=rng)
    y_axis_sp = make_good_unitary(dim=args.dim, rng=rng)

    np.random.seed(args.seed)

    xs_coarse = np.linspace(limit_low, limit_high, args.maze_size)
    ys_coarse = np.linspace(limit_low, limit_high, args.maze_size)

    xs = np.linspace(limit_low, limit_high, args.res)
    ys = np.linspace(limit_low, limit_high, args.res)

    coarse_mazes = np.zeros((args.n_mazes, args.maze_size, args.maze_size))
    fine_mazes = np.zeros((args.n_mazes, args.res, args.res))
    solved_mazes = np.zeros(
        (args.n_mazes, args.n_goals, args.res, args.res, 2))
    maze_sps = np.zeros((args.n_mazes, args.dim))
def main():
    parser = argparse.ArgumentParser(
        'Train a network to clean up a noisy spatial semantic pointer')

    parser.add_argument('--loss',
                        type=str,
                        default='cosine',
                        choices=['cosine', 'mse'])
    parser.add_argument('--noise-type',
                        type=str,
                        default='memory',
                        choices=['memory', 'gaussian', 'both'])
    parser.add_argument(
        '--sigma',
        type=float,
        default=1.0,
        help='sigma on the gaussian noise if noise-type==gaussian')
    parser.add_argument('--train-fraction',
                        type=float,
                        default=.8,
                        help='proportion of the dataset to use for training')
    parser.add_argument(
        '--n-samples',
        type=int,
        default=10000,
        help=
        'Number of memories to generate. Total samples will be n-samples * n-items'
    )
    parser.add_argument('--n-items',
                        type=int,
                        default=12,
                        help='number of items in memory. Proxy for noisiness')
    parser.add_argument('--dim',
                        type=int,
                        default=512,
                        help='Dimensionality of the semantic pointers')
    parser.add_argument('--hidden-size',
                        type=int,
                        default=512,
                        help='Hidden size of the cleanup network')
    parser.add_argument('--limits',
                        type=str,
                        default="-5,5,-5,5",
                        help='The limits of the space')
    parser.add_argument('--epochs', type=int, default=50)
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--momentum', type=float, default=0.9)
    parser.add_argument('--seed', type=int, default=13)
    parser.add_argument('--logdir',
                        type=str,
                        default='ssp_cleanup',
                        help='Directory for saved model and tensorboard log')
    parser.add_argument('--load-model',
                        type=str,
                        default='',
                        help='Optional model to continue training from')
    parser.add_argument(
        '--name',
        type=str,
        default='',
        help=
        'Name of output folder within logdir. Will use current date and time if blank'
    )
    parser.add_argument('--weight-histogram',
                        action='store_true',
                        help='Save histograms of the weights if set')
    parser.add_argument('--use-hex-ssp', action='store_true')
    parser.add_argument('--optimizer',
                        type=str,
                        default='adam',
                        choices=['sgd', 'adam', 'rmsprop'])

    args = parser.parse_args()

    args.limits = tuple(float(v) for v in args.limits.split(','))

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    dataset_name = 'data/ssp_cleanup_dataset_dim{}_seed{}_items{}_limit{}_samples{}.npz'.format(
        args.dim, args.seed, args.n_items, args.limits[1], args.n_samples)

    final_test_samples = 100
    final_test_items = 15
    final_test_dataset_name = 'data/ssp_cleanup_test_dataset_dim{}_seed{}_items{}_limit{}_samples{}.npz'.format(
        args.dim, args.seed, final_test_items, args.limits[1],
        final_test_samples)

    if not os.path.exists('data'):
        os.makedirs('data')

    rng = np.random.RandomState(seed=args.seed)
    if args.use_hex_ssp:
        x_axis_sp, y_axis_sp = get_axes(dim=args.dim, n=3, seed=args.seed)
    else:
        x_axis_sp = make_good_unitary(args.dim, rng=rng)
        y_axis_sp = make_good_unitary(args.dim, rng=rng)

    if args.noise_type == 'gaussian':
        # Simple generation
        clean_ssps = np.zeros((args.n_samples, args.dim))
        coords = np.zeros((args.n_samples, 2))
        for i in range(args.n_samples):
            x = np.random.uniform(low=args.limits[0], high=args.limits[1])
            y = np.random.uniform(low=args.limits[2], high=args.limits[3])

            clean_ssps[i, :] = encode_point(x,
                                            y,
                                            x_axis_sp=x_axis_sp,
                                            y_axis_sp=y_axis_sp).v
            coords[i, 0] = x
            coords[i, 1] = y
        # Gaussian noise will be added later
        noisy_ssps = clean_ssps.copy()
    else:

        if os.path.exists(dataset_name):
            print("Loading dataset")
            data = np.load(dataset_name)
            clean_ssps = data['clean_ssps']
            noisy_ssps = data['noisy_ssps']
        else:
            print("Generating SSP cleanup dataset")
            clean_ssps, noisy_ssps, coords = generate_cleanup_dataset(
                x_axis_sp=x_axis_sp,
                y_axis_sp=y_axis_sp,
                n_samples=args.n_samples,
                dim=args.dim,
                n_items=args.n_items,
                limits=args.limits,
                seed=args.seed,
            )
            print("Dataset generation complete. Saving dataset")
            np.savez(
                dataset_name,
                clean_ssps=clean_ssps,
                noisy_ssps=noisy_ssps,
                coords=coords,
                x_axis_vec=x_axis_sp.v,
                y_axis_vec=x_axis_sp.v,
            )

    # check if the final test set has been generated yet
    if os.path.exists(final_test_dataset_name):
        print("Loading final test dataset")
        final_test_data = np.load(final_test_dataset_name)
        final_test_clean_ssps = final_test_data['clean_ssps']
        final_test_noisy_ssps = final_test_data['noisy_ssps']
    else:
        print("Generating final test dataset")
        final_test_clean_ssps, final_test_noisy_ssps, final_test_coords = generate_cleanup_dataset(
            x_axis_sp=x_axis_sp,
            y_axis_sp=y_axis_sp,
            n_samples=final_test_samples,
            dim=args.dim,
            n_items=final_test_items,
            limits=args.limits,
            seed=args.seed,
        )
        print("Final test generation complete. Saving dataset")
        np.savez(
            final_test_dataset_name,
            clean_ssps=final_test_clean_ssps,
            noisy_ssps=final_test_noisy_ssps,
            coords=final_test_coords,
            x_axis_vec=x_axis_sp.v,
            y_axis_vec=x_axis_sp.v,
        )

    # Add gaussian noise if required
    if args.noise_type == 'gaussian' or args.noise_type == 'both':
        noisy_ssps += np.random.normal(loc=0,
                                       scale=args.sigma,
                                       size=noisy_ssps.shape)

    n_samples = clean_ssps.shape[0]
    n_train = int(args.train_fraction * n_samples)
    n_test = n_samples - n_train
    assert (n_train > 0 and n_test > 0)
    train_clean = clean_ssps[:n_train, :]
    train_noisy = noisy_ssps[:n_train, :]
    test_clean = clean_ssps[n_train:, :]
    test_noisy = noisy_ssps[n_train:, :]

    # NOTE: this dataset is actually generic and can take any input/output mapping
    dataset_train = CoordDecodeDataset(vectors=train_noisy, coords=train_clean)
    dataset_test = CoordDecodeDataset(vectors=test_noisy, coords=test_clean)
    dataset_final_test = CoordDecodeDataset(vectors=final_test_noisy_ssps,
                                            coords=final_test_clean_ssps)

    trainloader = torch.utils.data.DataLoader(
        dataset_train,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=0,
    )

    # For testing just do everything in one giant batch
    testloader = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=len(dataset_test),
        shuffle=False,
        num_workers=0,
    )

    final_testloader = torch.utils.data.DataLoader(
        dataset_final_test,
        batch_size=len(dataset_final_test),
        shuffle=False,
        num_workers=0,
    )

    model = FeedForward(dim=dataset_train.dim,
                        hidden_size=args.hidden_size,
                        output_size=dataset_train.dim)

    # Open a tensorboard writer if a logging directory is given
    if args.logdir != '':
        current_time = datetime.now().strftime('%b%d_%H-%M-%S')
        save_dir = osp.join(args.logdir, current_time)
        writer = SummaryWriter(log_dir=save_dir)
        if args.weight_histogram:
            # Log the initial parameters
            for name, param in model.named_parameters():
                writer.add_histogram('parameters/' + name,
                                     param.clone().cpu().data.numpy(), 0)

    mse_criterion = nn.MSELoss()
    cosine_criterion = nn.CosineEmbeddingLoss()

    if args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=args.momentum)
    elif args.optimizer == 'rmsprop':
        optimizer = torch.optim.RMSprop(model.parameters(),
                                        lr=args.lr,
                                        momentum=args.momentum)
    elif args.optimizer == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    else:
        raise NotImplementedError

    for e in range(args.epochs):
        print('Epoch: {0}'.format(e + 1))

        avg_mse_loss = 0
        avg_cosine_loss = 0
        n_batches = 0
        for i, data in enumerate(trainloader):

            noisy, clean = data

            if noisy.size()[0] != args.batch_size:
                continue  # Drop data, not enough for a batch
            optimizer.zero_grad()

            outputs = model(noisy)

            mse_loss = mse_criterion(outputs, clean)
            # Modified to use CosineEmbeddingLoss
            cosine_loss = cosine_criterion(outputs, clean,
                                           torch.ones(args.batch_size))

            avg_cosine_loss += cosine_loss.data.item()
            avg_mse_loss += mse_loss.data.item()
            n_batches += 1

            if args.loss == 'cosine':
                cosine_loss.backward()
            else:
                mse_loss.backward()

            # print(loss.data.item())

            optimizer.step()

        print(avg_cosine_loss / n_batches)

        if args.logdir != '':
            if n_batches > 0:
                avg_cosine_loss /= n_batches
                writer.add_scalar('avg_cosine_loss', avg_cosine_loss, e + 1)
                writer.add_scalar('avg_mse_loss', avg_mse_loss, e + 1)

            if args.weight_histogram and (e + 1) % 10 == 0:
                for name, param in model.named_parameters():
                    writer.add_histogram('parameters/' + name,
                                         param.clone().cpu().data.numpy(),
                                         e + 1)

    print("Testing")
    with torch.no_grad():

        for label, loader in zip(['test', 'final_test'],
                                 [testloader, final_testloader]):

            # Everything is in one batch, so this loop will only happen once
            for i, data in enumerate(loader):

                noisy, clean = data

                outputs = model(noisy)

                mse_loss = mse_criterion(outputs, clean)
                # Modified to use CosineEmbeddingLoss
                cosine_loss = cosine_criterion(outputs, clean,
                                               torch.ones(len(loader)))

                print(cosine_loss.data.item())

            if args.logdir != '':
                # TODO: get a visualization of the performance

                # show plots of the noisy, clean, and cleaned up with the network
                # note that the plotting mechanism itself uses nearest neighbors, so has a form of cleanup built in

                xs = np.linspace(args.limits[0], args.limits[1], 256)
                ys = np.linspace(args.limits[0], args.limits[1], 256)

                heatmap_vectors = get_heatmap_vectors(xs, ys, x_axis_sp,
                                                      y_axis_sp)

                noisy_coord = ssp_to_loc_v(noisy, heatmap_vectors, xs, ys)

                pred_coord = ssp_to_loc_v(outputs, heatmap_vectors, xs, ys)

                clean_coord = ssp_to_loc_v(clean, heatmap_vectors, xs, ys)

                fig_noisy_coord, ax_noisy_coord = plt.subplots()
                fig_pred_coord, ax_pred_coord = plt.subplots()
                fig_clean_coord, ax_clean_coord = plt.subplots()

                plot_predictions_v(noisy_coord,
                                   clean_coord,
                                   ax_noisy_coord,
                                   min_val=args.limits[0],
                                   max_val=args.limits[1],
                                   fixed_axes=True)

                plot_predictions_v(pred_coord,
                                   clean_coord,
                                   ax_pred_coord,
                                   min_val=args.limits[0],
                                   max_val=args.limits[1],
                                   fixed_axes=True)

                plot_predictions_v(clean_coord,
                                   clean_coord,
                                   ax_clean_coord,
                                   min_val=args.limits[0],
                                   max_val=args.limits[1],
                                   fixed_axes=True)

                writer.add_figure('{}/original_noise'.format(label),
                                  fig_noisy_coord)
                writer.add_figure('{}/test_set_cleanup'.format(label),
                                  fig_pred_coord)
                writer.add_figure('{}/ground_truth'.format(label),
                                  fig_clean_coord)
                # fig_hist = plot_histogram(predictions=outputs, coords=coord)
                # writer.add_figure('test set histogram', fig_hist)
                writer.add_scalar('{}/test_cosine_loss'.format(label),
                                  cosine_loss.data.item())
                writer.add_scalar('{}/test_mse_loss'.format(label),
                                  mse_loss.data.item())

    # Close tensorboard writer
    if args.logdir != '':
        writer.close()

        torch.save(model.state_dict(), osp.join(save_dir, 'model.pt'))

        params = vars(args)
        # # Additionally save the axis vectors used
        # params['x_axis_vec'] = list(x_axis_sp.v)
        # params['y_axis_vec'] = list(y_axis_sp.v)
        with open(osp.join(save_dir, "params.json"), "w") as f:
            json.dump(params, f)