Ejemplo n.º 1
0
def _test():
    layers = np.load(LAYERS_PATH)
    first_layer_id = 1 - layers[0]['include_raw_layer']
    l_input = layers[0]
    images, labels = load_mnist.load_images(images_number=IMAGES_NUMBER,
                                            train=False,
                                            digits=l_input['digits'])
    images = np.array([cv2.resize(im, (14, 14)) for im in images],
                      dtype=np.uint8)
    images = (images > l_input['x_mean']).astype(np.int32)
    label_map = layers[-1]['label_map']
    labels_predicted = []
    for im, label_true in tqdm(zip(images, labels), desc="Test"):
        y = im.flatten()
        winner = -1
        for layer in layers[first_layer_id:]:
            y = kWTA(np.dot(layer['weights'], y), sparsity=layer['sparsity'])
            l_attractors = layer['attractors']
            overlaps = np.dot(l_attractors, y)
            winner = overlaps.argmax()
            y = l_attractors[winner]
        labels_predicted.append(label_map[winner])
    labels_predicted = np.hstack(labels_predicted)
    accuracy = np.mean(labels == labels_predicted)
    print("Accuracy: {}".format(accuracy))
    return accuracy
Ejemplo n.º 2
0
def learn_pairs(label_interest=5, n_jumps_test=50):
    """
    :param label_interest: MNIST label of interest
    :param n_jumps_test: how many test saccades to be made for one image;
                         as we increase `n_jumps_test`, we expect overlap with L23 train history to decrease in time,
                         since during the training we observe only the most significant features in an image.
                         Ideally, we'd like the overlap not to decrease much in time.
    """
    images, labels = load_mnist.load_images(images_number=100)

    world = World()
    poppy = Agent()

    images_interest = images[labels == label_interest]
    for image in images_interest:
        world.add_image(image)
        poppy.cortex.reset_activations()
        l23_train = poppy.learn_pairs(world, label_interest)
        world.reset()
        if n_jumps_test == 0:
            l23_test = poppy.learn_pairs(world, label=label_interest)
        else:
            l23_test = []
            poppy.sense_data(world)
            for saccade in range(n_jumps_test):
                poppy.sense_data(world)
                l23_test.append(poppy.cortex.V1.layers['L23'].cells.copy())
            l23_test = np.vstack(l23_test)
        overlap = np.dot(l23_train, l23_test.T)
        overlap = (overlap * 255 /
                   poppy.cortex.V1.layers['L23'].n_active).astype(np.uint8)
        cv2.imshow('overlap', overlap)
        cv2_step()
Ejemplo n.º 3
0
def test_translate_display(label_interest=5, display=True):
    images, labels = load_mnist.load_images(images_number=1000)
    cortex = Cortex()
    overlaps = []
    translation_x = np.array([[1, 0, 1], [0, 1, 0]], dtype=np.float32)
    for im in tqdm(images[labels == label_interest], desc="Translation test"):
        im = apply_blur(im)
        # cortex.compute(im, (0,0,0), display=True)
        overlap = compute_translation_overlap(cortex, im, translation_x, display=display)
        overlaps.append(overlap)
        if display:
            cv2_step()
    print("Overlap mean={:.4f} std={:.4f}".format(np.mean(overlaps), np.std(overlaps)))
Ejemplo n.º 4
0
def one_image(label_interest=5):
    images, labels = load_mnist.load_images(images_number=100)

    world = World()
    poppy = Agent()

    image_interest = images[labels == label_interest][0]
    world.add_image(image_interest)
    poppy.cortex.reset_activations()
    poppy.cortex.display = True

    while True:
        poppy.sense_data(world)
        poppy.cortex.associate(label=label_interest)
Ejemplo n.º 5
0
def plot_threshold_impact(digits=(0, 1),
                          hidden_size=(2000, ),
                          hidden_sparsity=0.05,
                          include_raw_layer=False):
    if include_raw_layer:
        # raw layer only
        hidden_size = []
    images_unused, _ = load_mnist.load_images(images_number=IMAGES_NUMBER,
                                              train=True,
                                              digits=digits)
    n_images = len(images_unused)
    thr_linspace = 1.0 / np.power(2, np.arange(start=5, stop=-1, step=-1))
    n_attractors = []
    accuracies = []
    for thr in thr_linspace:
        train(digits=digits,
              hidden_sizes=hidden_size,
              hidden_sparsity=hidden_sparsity,
              hidden_thr_overlap=thr,
              include_raw_layer=include_raw_layer)
        layers = np.load(LAYERS_PATH)
        n_attractors.append(len(layers[-1]['attractors']))
        accuracy = _test()
        accuracies.append(accuracy)
    n_attractors = np.hstack(n_attractors) / float(n_images)
    plt.plot(thr_linspace,
             n_attractors,
             label='# attractors, normed',
             marker='o')
    plt.plot(thr_linspace, accuracies, label='accuracies', marker='o')
    title = 'Digits {}'.format(tuple(digits))
    if include_raw_layer:
        title += ". Raw input only"
    plt.title(title)
    plt.xlabel('overlap threshold')
    plt.legend()
    plot_path = 'plots/threshold_impact_digits={},include_raw_layer={}.png'.format(
        tuple(digits), include_raw_layer)
    create_parent_dir(plot_path)
    plt.savefig(plot_path)
    plt.show()
Ejemplo n.º 6
0
def test_translate_plot(max_dist=5, layer='L4'):
    images, labels = load_mnist.load_images(images_number=100)
    cortex = Cortex()
    overlaps = np.zeros(shape=int(np.sqrt(2 * max_dist ** 2))+1, dtype=np.float32)
    counts = np.zeros(shape=overlaps.shape, dtype=np.int32)
    for dx in trange(max_dist+1, desc="Translate image distances"):
        for dy in range(max_dist+1):
            dist = int(np.sqrt(dx ** 2 + dy ** 2))
            translation_x = np.array([[1, 0, dx], [0, 1, dy]], dtype=np.float32)
            for im_orig in images:
                overlap = compute_translation_overlap(cortex, im_orig, translation_x, layer=layer)
                overlaps[dist] += overlap
            counts[dist] += len(images)
    overlaps /= counts
    plt.plot(np.arange(len(overlaps)), overlaps, label="retina {}".format(cortex.retina.shape))
    plt.xlabel("Translation distance, px")
    plt.ylabel("Overlap with origin image")
    plt.title("{} SDR stability test: image translation".format(layer))
    plt.legend()
    plt.grid()
    plt.savefig("translation.png")
    plt.show()
Ejemplo n.º 7
0
def run(world, agent, train=True, images_number=1000, digits=(5, 6)):
    images, labels = load_mnist.load_images(images_number,
                                            train=train,
                                            digits=digits)
    correct = 0
    total = len(labels)
    for image, label in tqdm(zip(images, labels),
                             desc="train={}".format(train)):
        world.add_image(image)
        agent.cortex.reset_activations()
        if train:
            # agent.learn_pairs(world, label=label)
            for corner_xy in world.saccades():
                agent.sense_data(world, position=corner_xy)
            agent.cortex.associate(label)
        else:
            for corner_xy in world.saccades():
                agent.sense_data(world, position=corner_xy)
            label_predicted = agent.cortex.predict()
            correct += label_predicted == label
    if not train:
        accuracy = float(correct) / total
        print("Accuracy: {}".format(accuracy))
Ejemplo n.º 8
0
def test_inner_outer_overlap(layer='L4'):
    images, labels = load_mnist.load_images(images_number=100)
    cortex = Cortex()
    prepare_lists = lambda: [[] for digit in range(10)]
    examples_sdr = prepare_lists()
    for img, label in zip(tqdm(images, desc="Inner- & outer-examples overlap test"), labels):
        cortex.compute(img, vector=(0, 0, 0))
        sdr = cortex.V1.layers[layer].cells.copy()
        examples_sdr[label].append(sdr)
    overlaps_outer = prepare_lists()
    overlaps_inner = prepare_lists()
    n_bits_active = cortex.V1.layers[layer].get_sparse_bits_count()
    for label, sdrs_same in enumerate(examples_sdr):
        pairwise_same = np.dot(sdrs_same, np.transpose(sdrs_same)) / float(n_bits_active)
        pairwise_same_idx = np.triu_indices_from(pairwise_same, k=1)
        overlaps_inner[label] = pairwise_same[pairwise_same_idx]
        for label_other in range(label+1, 10):
            sdrs_other = examples_sdr[label_other]
            other_idx = np.random.choice(len(sdrs_other), size=max(len(sdrs_other) // 10, 1), replace=False)
            sdrs_other = np.take(sdrs_other, other_idx, axis=0)
            pairwise_other = np.dot(sdrs_same, np.transpose(sdrs_other)) / float(n_bits_active)
            pairwise_other_idx = np.triu_indices_from(pairwise_other, k=0)
            overlaps = pairwise_other[pairwise_other_idx]
            overlaps_outer[label_other].append(overlaps)
            overlaps_outer[label].append(overlaps)
    width = 0.35
    plt.bar(np.arange(10), [np.mean(ovlp) for ovlp in overlaps_inner],
            yerr=[np.std(ovlp) for ovlp in overlaps_inner], label="inner", width=width)
    plt.bar(np.arange(10) + width, [np.mean(np.vstack(ovlp)) for ovlp in overlaps_outer],
            yerr=[np.std(np.vstack(ovlp)) for ovlp in overlaps_outer], label="outer", width=width)
    plt.xticks(np.arange(10))
    plt.title("{} SDR stability test: inner- & outer-examples overlap".format(layer))
    plt.xlabel("Label")
    plt.ylabel("Overlap")
    plt.legend()
    plt.savefig("inner-outer-overlap.png")
    plt.show()
Ejemplo n.º 9
0
def train(digits=(5, 6),
          hidden_sizes=(2000, 10000),
          hidden_sparsity=0.05,
          hidden_thr_overlap=(0.3, 0.1),
          include_raw_layer=False,
          display=False):
    images, labels = load_mnist.load_images(images_number=IMAGES_NUMBER,
                                            train=True,
                                            digits=digits)
    images = np.array([cv2.resize(im, (14, 14)) for im in images],
                      dtype=np.uint8)
    x_mean = images.mean()
    images = (images > x_mean).astype(np.int32)

    layers = [
        dict(sparsity=np.mean(images),
             size=images[0].size,
             x_mean=x_mean,
             digits=digits,
             weights=np.identity(n=images[0].size, dtype=np.int32),
             threshold=1.0 * images[0].size * hidden_thr_overlap,
             include_raw_layer=include_raw_layer,
             attractors=[])
    ]

    n_hidden = len(hidden_sizes)
    if not is_iterable(hidden_sparsity):
        hidden_sparsity = [hidden_sparsity] * n_hidden
    if not is_iterable(hidden_thr_overlap):
        hidden_thr_overlap = [hidden_thr_overlap] * n_hidden
    for layer_id in range(n_hidden):
        size = hidden_sizes[layer_id]
        w = np.random.binomial(n=1,
                               p=0.1,
                               size=(size, layers[layer_id]['size']))
        sparsity = hidden_sparsity[layer_id]
        max_overlap = sparsity * size
        layers.append(
            dict(sparsity=sparsity,
                 size=size,
                 threshold=hidden_thr_overlap[layer_id] * max_overlap,
                 weights=w,
                 attractors=[]))
    first_layer_id = 1 - include_raw_layer
    _do_train(layers=layers[first_layer_id:], images=images, labels=labels)

    n_input_unique = len(images)
    for layer_id in range(first_layer_id, len(layers)):
        layer = layers[layer_id]
        n_attractors = len(layer['attractors'])
        print("Layer {}: {} inputs --> {} attractors".format(
            layer_id, n_input_unique, n_attractors))
        n_input_unique = n_attractors

    for layer_id in range(first_layer_id, len(layers)):
        layer = layers[layer_id]
        if display:
            layer_attractors = layer['attractors']
            for attractor in layer_attractors:
                w = layer['weights']
                sparsity_prev = layers[layer_id - 1]['sparsity']
                x_recontraction = kWTA(np.dot(w.T, attractor),
                                       sparsity=sparsity_prev)
                side_size = int(np.ceil(np.sqrt(x_recontraction.size)))
                x_recontraction.resize(side_size, side_size)
                plt.imshow(x_recontraction)
                plt.title("Layer {} reconstructed".format(layer_id - 1))
                plt.show()
    np.save(LAYERS_PATH, layers)