Example #1
0
File: kn.py Project: alex-me/nencog
 def knode( self, category='person' ):
     """
     insert a Nengo node for Keras interface
     """
     if category in self.nodes:
         raise ValueError( "node's label {} already in use".format( category ) )
     size_in             = numpy.prod( shapes[ category ] )
     size_out            = shapes[ category ][ 0 ]
     label               = category
     node                = Kinter( category )
     self.nodes[ label ] = nengo_dl.TensorNode( node, size_in=size_in, size_out=size_out, label=label )
Example #2
0
 def node( self, node, label, ntype='tf' ):
     """
     insert a Nengo node
     """
     if label in self.nodes:
         raise ValueError( "node's label {} already in use".format( label ) )
     if ntype == 'tf':
         size_out            = n_imgs * n_class
         self.nodes[ label ] = nengo_dl.TensorNode( node, size_in=i_size, size_out=size_out, label=label )
     if ntype == 'nengo':
         self.nodes[ label ]    = nengo.Node( node, size_in=i_size, size_out=n_class, label=label )
Example #3
0
def setup_nn(seed=1):
    """
    build main nengo network
    """
    nn = nengo.Network(seed=seed)
    with nn:
        v = nengo_dl.TensorNode(Vision(),
                                size_in=i_size,
                                size_out=n_class,
                                label="cnn")
        o = nengo.Probe(v, label="cnn_result")
    return nn
Example #4
0
def r_nn(image):
    """
    recall the nengo network on the given image
    """
    img = flat_cifar.img_numpy(image)
    with nengo.Network() as nn:
        i = nengo.Node(output=img.flatten())
        v = nengo_dl.TensorNode(Vision(),
                                size_in=i_size,
                                size_out=n_class,
                                label="cnn")
        nengo.Connection(i, v, synapse=None, label="img_to_cnn")
        o = nengo.Probe(v, label="cnn_result")
    with nengo_dl.Simulator(nn) as sim:
        sim.step()
    return sim.data[o][0]
Example #5
0
def mnist(use_tensor_layer=True):
    """
    A network designed to stress-test tensor layers (based on mnist net).

    Parameters
    ----------
    use_tensor_layer : bool
        If True, use individual tensor_layers to build the network, as opposed
        to a single TensorNode containing all layers.

    Returns
    -------
    net : `nengo.Network`
        benchmark network
    """

    with nengo.Network() as net:
        # create node to feed in images
        net.inp = nengo.Node(np.ones(28 * 28))

        if use_tensor_layer:
            nengo_nl = nengo.RectifiedLinear()

            ensemble_params = dict(max_rates=nengo.dists.Choice([100]),
                                   intercepts=nengo.dists.Choice([0]))
            amplitude = 1
            synapse = None

            x = nengo_dl.tensor_layer(net.inp,
                                      tf.layers.conv2d,
                                      shape_in=(28, 28, 1),
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.conv2d,
                                      shape_in=(26, 26, 32),
                                      transform=amplitude,
                                      filters=32,
                                      kernel_size=3)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.average_pooling2d,
                                      shape_in=(24, 24, 32),
                                      synapse=synapse,
                                      transform=amplitude,
                                      pool_size=2,
                                      strides=2)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=128)
            x = nengo_dl.tensor_layer(x, nengo_nl, **ensemble_params)

            x = nengo_dl.tensor_layer(x,
                                      tf.layers.dropout,
                                      rate=0.4,
                                      transform=amplitude)

            x = nengo_dl.tensor_layer(x, tf.layers.dense, units=10)
        else:
            nl = tf.nn.relu

            # def softlif_layer(x, sigma=1, tau_ref=0.002, tau_rc=0.02,
            #                   amplitude=1):
            #     # x -= 1
            #     z = tf.nn.softplus(x / sigma) * sigma
            #     z += 1e-10
            #     rates = amplitude / (tau_ref + tau_rc * tf.log1p(1 / z))
            #     return rates

            @nengo_dl.reshaped((28, 28, 1))
            def mnist_node(_, x):  # pragma: no cover
                x = tf.layers.conv2d(x,
                                     filters=32,
                                     kernel_size=3,
                                     activation=nl)
                x = tf.layers.conv2d(x,
                                     filters=32,
                                     kernel_size=3,
                                     activation=nl)
                x = tf.layers.average_pooling2d(x, pool_size=2, strides=2)
                x = tf.contrib.layers.flatten(x)
                x = tf.layers.dense(x, 128, activation=nl)
                x = tf.layers.dropout(x, rate=0.4)
                x = tf.layers.dense(x, 10)

                return x

            node = nengo_dl.TensorNode(mnist_node,
                                       size_in=28 * 28,
                                       size_out=10)
            x = node
            nengo.Connection(net.inp, node, synapse=None)

        net.p = nengo.Probe(x)

    return net
Example #6
0
def three():
    K.clear_session()
    data = []
    """
    labels = [['black', 'jeans']]*344 + [['blue', 'dress']]*386 + [['blue', 'jeans']]*356 + [['blue', 'shirt']]*369 + \
             [['blue', 'sweater']]*99 + [['gray', 'shorts']]*96 + [['red', 'dress']]*380 + [['red', 'shirt']]*332
    """
    EPOCHS = 25
    INIT_LR = 1e-3
    BS = 32
    IMAGE_DIMS = (96, 96, 3)

    checkpoints_dir = '.\\checkpoints'
    # load the image, pre-process it, and store it in the data list
    import glob
    image_types = ('*.jpg', '*.jpeg', '*.png')
    files = []
    labels = []
    folders = glob.glob(
        "D:\\Users\\bob\\PycharmProjects\\test_recommendation\\nengo_classification\\dataset\\*\\"
    )
    for image_type in image_types:
        files.extend(
            glob.glob(
                "D:\\Users\\bob\\PycharmProjects\\test_recommendation\\nengo_classification\\dataset\\*\\"
                + image_type))
    print(len(files))
    for f in files:
        """
        image = cv2.imread(f)
        image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
        image = img_to_array(image)
        data.append(image)
        """
        label = f.split("\\")[-2].split("_")
        labels.append(label)

    # scale the raw pixel intensities to the range [0, 1]
    data = np.load("data.npy")
    labels = np.array(labels)

    # print("[INFO] data matrix: {} images ({:.2f}MB)".format(len(imagePaths), data.nbytes / (1024 * 1000.0)))

    # binarize the labels using scikit-learn's special multi-label
    # binarizer implementation
    # print("[INFO] class labels:")
    mlb = MultiLabelBinarizer()
    labels = mlb.fit_transform(labels)

    #print(labels)
    """
    # loop over each of the possible class labels and show them
    for (i, label) in enumerate(mlb.classes_):
        print("{}. {}".format(i + 1, label))

    print(data.shape)
    print(labels.shape)
    # partition the data into training and testing splits using 80% of
    # the data for training and the remaining 20% for testing
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels, test_size=0.2, random_state=42)

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
                             height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
                             horizontal_flip=True, fill_mode="nearest")

    # initialize the model using a sigmoid activation as the final layer
    # in the network so we can perform multi-label classification
    print("[INFO] compiling model...")
    model = SmallerVGGNet.build(
        width=IMAGE_DIMS[1], height=IMAGE_DIMS[0],
        depth=IMAGE_DIMS[2], classes=len(mlb.classes_),
        finalAct="sigmoid")

    # initialize the optimizer (SGD is sufficient)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)

    # compile the model using binary cross-entropy rather than
    # categorical cross-entropy -- this may seem counterintuitive for
    # multi-label classification, but keep in mind that the goal here
    # is to treat each output label as an independent Bernoulli
    # distribution
    model.compile(loss="binary_crossentropy", optimizer=opt,
                  metrics=["accuracy"])

    print("[INFO] training network...")
    H = model.fit_generator(
        aug.flow(trainX, trainY, batch_size=BS),
        validation_data=(testX, testY),
        steps_per_epoch=len(trainX) // BS,
        epochs=EPOCHS, verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save("fashion_model.h5")
    model.save_weights("fashion_model_weights.h5")

    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="upper left")
    plt.savefig("plot.png")
    """
    class KerasNode:
        def __init__(self, keras_model, mlb):
            self.model = keras_model
            self.mlb = mlb

        def pre_build(self, *args):
            self.model = clone_model(self.model)

        def __call__(self, t, x):
            # pre-process the image for classification
            img = tf.reshape(x, (-1, ) + IMAGE_DIMS)
            #print(img.shape)
            """
            img = cv2.resize(img, (96, 96))
            img = img.astype("float") / 255.0
            img = img_to_array(img)
            img = np.expand_dims(img, axis=0)
            """
            return self.model.call(img)
            #eturn self.model.call(tf.convert_to_tensor(img, dtype=tf.float32))

        def post_build(self, sess, rng):
            with sess.as_default():
                self.model.load_weights("fashion_model_weights.h5")
                self.mlb = pickle.loads(open("mlb.pickle", "rb").read())
            #pass

    net_input_shape = np.prod((96, 96, 3))  # because input will be a vector

    with nengo.Network() as net:
        # create a normal input node to feed in our test image.
        # the `np.ones` array is a placeholder, these
        # values will be replaced with the Fashion MNIST images
        # when we run the Simulator.
        input_node = nengo.Node(output=np.ones((net_input_shape, )))

        # create a TensorNode containing the KerasNode we defined
        # above, passing it the Keras model we created.
        # we also need to specify size_in (the dimensionality of
        # our input vectors, the flattened images) and size_out (the number
        # of classification classes output by the keras network)
        model = load_model("fashion_model.h5")
        mlb = pickle.loads(open("mlb.pickle", "rb").read())
        keras_node = nengo_dl.TensorNode(KerasNode(model, mlb),
                                         size_in=net_input_shape,
                                         size_out=len(mlb.classes_))

        # connect up our input to our keras node
        nengo.Connection(input_node, keras_node, synapse=None)

        # add a probes to collect output of keras node
        keras_p = nengo.Probe(keras_node)
        input_p = nengo.Probe(input_node)

    minibatch_size = 20

    np.random.seed(3)
    test_inds = np.random.randint(low=0,
                                  high=data.shape[0],
                                  size=(minibatch_size, ))
    test_inputs = data[test_inds]

    # flatten images so we can pass them as vectors to the input node
    test_inputs = test_inputs.reshape((-1, net_input_shape))

    # unlike in Keras, NengoDl simulations always run over time.
    # so we need to add the time dimension to our data (even though
    # in this case we'll just run for a single timestep).
    test_inputs = test_inputs[:, None, :]

    with nengo_dl.Simulator(net, minibatch_size=len(test_inputs)) as sim:
        sim.step(data={input_node: test_inputs})

    tensornode_output = sim.data[keras_p]

    for i in range(len(test_inputs)):
        plt.figure()
        b, g, r = cv2.split(data[test_inds[i]])
        rgb_img = cv2.merge([r, g, b])
        plt.imshow(rgb_img)
        print("[INFO] classifying ...")
        proba = tensornode_output[i][0]
        print(proba)
        idxs = np.argsort(proba)[::-1]
        print(idxs)
        print(np.argmax(tensornode_output[i, 0]))
        plt.axis("off")
        plt.title("%s, %s" % (mlb.classes_[idxs[0]], mlb.classes_[idxs[1]]))
        plt.show()
Example #7
0
            def __init__(self, units, order, theta, input_d, **kwargs):
                super().__init__(**kwargs)

                # compute the A and B matrices according to the LMU's mathematical
                # derivation (see the paper for details)
                Q = np.arange(order, dtype=np.float64)
                R = (2 * Q + 1)[:, None] / theta
                j, i = np.meshgrid(Q, Q)

                A = np.where(i < j, -1, (-1.0) ** (i - j + 1)) * R
                B = (-1.0) ** Q[:, None] * R
                C = np.ones((1, order))
                D = np.zeros((1,))

                A, B, _, _, _ = cont2discrete((A, B, C, D), dt=1.0, method="zoh")

                with self:
                    nengo_dl.configure_settings(trainable=None)

                    # create objects corresponding to the x/u/m/h variables in LMU
                    self.x = nengo.Node(size_in=input_d)
                    self.u = nengo.Node(size_in=1)
                    self.m = nengo.Node(size_in=order)
                    self.h = nengo_dl.TensorNode(
                        tf.nn.tanh, shape_in=(units,), pass_time=False
                    )

                    # compute u_t
                    # note that setting synapse=0 (versus synapse=None) adds a
                    # one-timestep delay, so we can think of any connections with
                    # synapse=0 as representing value_{t-1}
                    nengo.Connection(
                        self.x, self.u, transform=np.ones((1, input_d)), synapse=None
                    )
                    nengo.Connection(
                        self.h, self.u, transform=np.zeros((1, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m, self.u, transform=np.zeros((1, order)), synapse=0
                    )

                    # compute m_t
                    # in this implementation we'll make A and B non-trainable, but they
                    # could also be optimized in the same way as the other parameters
                    conn = nengo.Connection(self.m, self.m, transform=A, synapse=0)
                    self.config[conn].trainable = False
                    conn = nengo.Connection(self.u, self.m, transform=B, synapse=None)
                    self.config[conn].trainable = False

                    # compute h_t
                    nengo.Connection(
                        self.x,
                        self.h,
                        transform=np.zeros((units, input_d)),
                        synapse=None,
                    )
                    nengo.Connection(
                        self.h, self.h, transform=np.zeros((units, units)), synapse=0
                    )
                    nengo.Connection(
                        self.m,
                        self.h,
                        transform=nengo_dl.dists.Glorot(distribution="normal"),
                        synapse=None,
                    )
Example #8
0
    def build_network(neuron_type,
                      drop_p,
                      l2_weight,
                      n_units=1024,
                      num_layers=4,
                      output_size=1):
        with nengo.Network() as net:

            use_dropout = False
            if drop_p:
                use_dropout = True

            #net.config[nengo.Connection].synapse = None
            #nengo_dl.configure_settings(trainable=False)

            # input node
            inp = nengo.Node([0])

            shape_in = 1
            x = inp

            # the regularizer is a function, so why not reuse it
            reg = tf.contrib.layers.l2_regularizer(l2_weight)

            class DenseLayer(object):
                i = 0

                def pre_build(self, shape_in, shape_out):
                    self.W = tf.get_variable("weights" + str(DenseLayer.i),
                                             shape=(shape_in[1], shape_out[1]),
                                             regularizer=reg)
                    self.B = tf.get_variable("biases" + str(DenseLayer.i),
                                             shape=(1, shape_out[1]),
                                             regularizer=reg)
                    DenseLayer.i += 1

                def __call__(self, t, x):
                    return x @ self.W + self.B

            for n in range(num_layers):
                # add a fully connected layer

                a = nengo_dl.TensorNode(DenseLayer(),
                                        size_in=shape_in,
                                        size_out=n_units,
                                        label='dense{}'.format(n))
                nengo.Connection(x, a, synapse=None)

                shape_in = n_units
                x = a

                # apply an activation function
                x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)

                # add a dropout layer
                x = nengo_dl.tensor_layer(x,
                                          tf.layers.dropout,
                                          rate=drop_p,
                                          training=use_dropout)

            # add an output layer
            a = nengo_dl.TensorNode(DenseLayer(),
                                    size_in=shape_in,
                                    size_out=output_size)
            nengo.Connection(x, a, synapse=None)

        return net, inp, a
Example #9
0
# snippet 5 (section 3.2)
import tensorflow as tf

inputs = np.random.randn(50, 1000, 1)
targets = inputs**2

with nengo_dl.Simulator(net, minibatch_size=10) as sim:
    sim.train(inputs={a: inputs},
              targets={p: targets},
              optimizer=tf.train.AdamOptimizer(),
              n_epochs=2,
              objective="mse")

# snippet 6 (section 3.3)
with net:

    def tensor_func(t, x):
        return tf.layers.dense(x, 100, activation=tf.nn.relu)

    t = nengo_dl.TensorNode(tensor_func, size_in=1)
    nengo.Connection(a, t)
    nengo.Connection(t, b.neurons)

# snippet 7 (section 3.3)
with net:
    t = nengo_dl.tensor_layer(a,
                              tf.layers.dense,
                              units=100,
                              activation=tf.nn.relu)
    nengo.Connection(t, b.neurons)