Пример #1
0
    def Evaluate(dataset, results, name):
        waveforms = {"mixes": [], "ann": [], "nengo": []}
        targets = []
        #possibly not the most efficient but keeps the dataset from being
        # loaded multiple times. It might be worthwhile to see if it's
        # faster doing batch executions but for some reason it was only
        # returning a single output for me
        for mixture, target in dataset:
            targets.append(target.numpy())
            waveforms["mixes"].append(mixture.numpy()[:, 0].reshape(-1))
            print("Calulating ann output")
            waveforms["ann"].append(model.predict(mixture).reshape(-1))
            print("Calulating nengo output")
            with nengo_dl.Simulator(snnModel) as snnNet:
                snnData = snnNet.predict(
                    {snnInLayer: mixture.numpy()[None, :, :]})
                waveforms["nengo"].append(
                    snnData[snnEnsembleProbe][0].reshape(-1))

        for key in waveforms:
            print(f"Measuring SNR: {key}")
            mean, stdDev = meanSNR(waveforms[key], targets)
            results[f"{name}: {key} Mean SNR"] = mean
            results[f"{name}: {key} StdDev SNR"] = stdDev

            print(f"Measuring norm SNR: {key}")
            mean, stdDev = meanSNRnorm(waveforms[key], targets)
            results[f"{name}: {key} Normed Mean SNR"] = mean
            results[f"{name}: {key} Normed StdDev SNR"] = stdDev

        return results
Пример #2
0
def test_nengo_dl_noise(neuron_type, seed, plt, allclose):
    pytest.importorskip("tensorflow")

    install_dl_builders()

    net, rates, _ = rate_nengo_dl_net(neuron_type)
    n_noise = 1000  # number of noise samples per x point

    with net:
        nengo_dl.configure_settings(learning_phase=True)  # run with `training=True`

    with nengo_dl.Simulator(net, dt=net.dt, minibatch_size=n_noise, seed=seed) as sim:
        input_data = {net.stim: np.tile(net.x[None, None, :], (n_noise, 1, 1))}
        sim.step(data=input_data)
        y = sim.data[net.probe][:, 0, :]

    ymean = y.mean(axis=0)
    y25 = np.percentile(y, 25, axis=0)
    y75 = np.percentile(y, 75, axis=0)
    dy25 = y25 - rates["ref"]
    dy75 = y75 - rates["ref"]

    # exponential models roughly fitted to 25/75th percentiles
    x1mask = net.x > 1.5
    x1 = net.x[x1mask]
    if isinstance(neuron_type.nengo_dl_noise, AlphaRCNoise):
        exp_model = 0.7 + 2.8 * np.exp(-0.22 * (x1 - 1))
        atol = 0.12 * exp_model.max()
    elif isinstance(neuron_type.nengo_dl_noise, LowpassRCNoise):
        exp_model = 1.5 + 2.2 * np.exp(-0.22 * (x1 - 1))
        atol = 0.2 * exp_model.max()

    rtol = 0.2
    mu_atol = 0.6  # depends on n_noise and variance of noise

    # --- plots
    plt.subplot(211)
    plt.plot(net.x, rates["med"], "--", label="LIF(tau_ref += 0.5*dt)")
    plt.plot(net.x, ymean, label="nengo_dl")
    plt.plot(net.x, y25, ":", label="25th")
    plt.plot(net.x, y75, ":", label="75th")
    plt.plot(net.x, rates["ref"], "k--", label="LoihiLIF")
    plt.legend()

    plt.subplot(212)
    plt.plot(net.x, ymean - rates["ref"], "b", label="mean")
    plt.plot(net.x, mu_atol * np.ones_like(net.x), "b:")
    plt.plot(net.x, -mu_atol * np.ones_like(net.x), "b:")
    plt.plot(net.x, y25 - rates["ref"], ":", label="25th")
    plt.plot(net.x, y75 - rates["ref"], ":", label="75th")
    plt.plot(x1, exp_model, "k--")
    plt.plot(x1, exp_model * (1 + rtol) + atol, "k:")
    plt.plot(x1, exp_model * (1 - rtol) - atol, "k:")
    plt.plot(x1, -exp_model, "k--")
    plt.legend()

    assert ymean.shape == rates["ref"].shape
    assert allclose(ymean, rates["ref"], atol=mu_atol, record_rmse=False)
    assert allclose(dy25[x1mask], -exp_model, atol=atol, rtol=rtol, record_rmse=False)
    assert allclose(dy75[x1mask], exp_model, atol=atol, rtol=rtol, record_rmse=False)
Пример #3
0
    def __init__(self,
                 process_list=None,
                 scheduler_mode="RR",
                 rr_time_slice=5,
                 num_cores=4096,
                 use_dl=False):
        self.calc_n_neurons = 4096
        self.clock_neurons = 2048
        self.num_cores = num_cores
        self.probes = []
        if rr_time_slice < 0:
            rr_time_slice = 3
        if scheduler_mode == "FCFS":
            rr_time_slice = 99999
        self.rr_time_slice = rr_time_slice

        if process_list is None:
            queue_nodes = QueueNode()
        else:
            queue_nodes = QueueNode(process_list, rr_time_slice)
        self.queue_nodes = queue_nodes
        #self.main_scheduler()
        self.model_two()
        if use_dl:
            import nengo_dl
            self.sim = nengo_dl.Simulator(self.model)
        else:
            self.sim = nengo.Simulator(self.model)
        global g_num_cores
        g_num_cores = num_cores
Пример #4
0
def profiling():
    """Run profiler on one of the benchmarks."""

    # note: in order for GPU profiling to work, you have to manually add
    # ...\CUDA\v8.0\extras\CUPTI\libx64 to your path
    net, p = pes(128, 32, nengo.RectifiedLinear())
    with nengo_dl.Simulator(net,
                            tensorboard=False,
                            unroll_simulation=50,
                            device="/gpu:0") as sim:
        sim.run_steps(150, profile=True)
Пример #5
0
    def predict(self, inputs):

        self.sim = nengo_dl.Simulator(self.net, minibatch_size=1)
        # self.sim.load_params(self.param_file)
        self.sim.compile(loss={self.out_p_filt: mse_loss})
        # self.n_steps = n_steps

        tiled_input = np.tile(inputs[:, None, :], (1, self.n_steps, 1))

        pred_eval = self.sim.predict(tiled_input)
        return pred_eval[self.out_p_filt][:, 10:, :].mean(axis=1)
Пример #6
0
def recall_nn( image ):
    """
    recall the nengo network on the given image, and return the simulation data
    """
    if models is None:
        return None
    imgs    = read_image( image )
    data    = in_windows( imgs )
    with nengo_dl.Simulator( nn.net, progress_bar=True ) as sim:
        sim.step( data=data )
    return sim.data
Пример #7
0
def recall_dl( data ):
    """
    recall the nengo DL network and return the simulation data
    """
    p   = {}
    pb  = verbose >= 2
    with nengo_dl.Simulator( nn.dl, progress_bar=pb ) as sim:
        sim.step( data=data )
        for c in nn.categories:
            probe   = nn.get_probe( c, 'dl' )
            p[ c ]  = sim.data[ probe ][ 0 ]
    return p
Пример #8
0
def run_profile(net, train=False, n_steps=150, do_profile=True, **kwargs):
    """
    Run profiler on a benchmark network.

    Parameters
    ----------
    net : :class:`~nengo:nengo.Network`
        The nengo Network to be profiled.
    train : bool, optional
        If True, profile the ``sim.train`` function. Otherwise, profile the
        ``sim.run`` function.
    n_steps : int, optional
        The number of timesteps to run the simulation.
    do_profile : bool, optional
        Whether or not to run profiling

    Notes
    -----
    kwargs will be passed on to :class:`.Simulator`
    """

    with net:
        nengo_dl.configure_settings(trainable=None if train else False)

    with nengo_dl.Simulator(net, **kwargs) as sim:
        # note: we run a few times to try to eliminate startup overhead (only
        # the data from the last run will be kept)
        if train:
            opt = tf.train.GradientDescentOptimizer(0.001)
            x = np.random.randn(sim.minibatch_size, n_steps, net.inp.size_out)
            y = np.random.randn(sim.minibatch_size, n_steps, net.p.size_in)

            for _ in range(2):
                sim.train({net.inp: x}, {net.p: y},
                          optimizer=opt,
                          n_epochs=1,
                          profile=do_profile)

            start = time.time()
            sim.train({net.inp: x}, {net.p: y},
                      optimizer=opt,
                      n_epochs=1,
                      profile=do_profile)
            print("Execution time:", time.time() - start)

        else:
            for _ in range(2):
                sim.run_steps(n_steps, profile=do_profile)

            start = time.time()
            sim.run_steps(n_steps, profile=do_profile)
            print("Execution time:", time.time() - start)
Пример #9
0
def run_snn(model,
            x_test,
            y_test,
            params_load_path,
            iteration,
            timesteps=50,
            scale_firing_rates=1000,
            synapse=0.01,
            batch_size=16):
    """
    Run model in spiking setting
    :param batch_size: batch size
    :param model: model reference
    :param x_test: testing features
    :param y_test: testing labels
    :param params_load_path: path to load parameters
    :param iteration: number of current iteration
    :param timesteps: number of timesteps
    :param scale_firing_rates: firing rate scaling
    :param synapse: synaptic smoothing
    :return: accuracy, precision, recall, f1 and confusion matrix from the testing data
    """
    converter = nengo_dl.Converter(
        model,
        swap_activations={tf.nn.relu: nengo.SpikingRectifiedLinear()},
        scale_firing_rates=scale_firing_rates,
        synapse=synapse
    )  # create a Nengo converter object and swap all relu activations with spiking relu

    with converter.net:
        nengo_dl.configure_settings(stateful=False)

    output_layer = converter.outputs[model.get_layer(
        'output_layer')]  # output layer for simulator

    x_test_tiled = np.tile(x_test,
                           (1, timesteps, 1))  # tile test data to timesteps

    with nengo_dl.Simulator(converter.net) as simulator:
        simulator.load_params(params_load_path)

        # Get the statistics
        accuracy, precision, recall, f1, confusion_matrix = get_metrics(
            simulator, output_layer, x_test_tiled, y_test, batch_size,
            f'{iteration}. CNN (SNN conversion)')
        return {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1': f1,
            'confusion_matrix': confusion_matrix
        }
Пример #10
0
    def __init__(self, desired_output=0):
        super(MNISTClassEnv, self).__init__()
        self.__version__ = "0.1.0"
        logging.info("MNIST Classification Brain - Version {}".format(
            self.__version__))

        # model specific vars
        self.desired_output = desired_output  # TODO part of the input/state
        self.n_steps = 30
        self.stim_steps = 5  # TODO if not working, try stim_steps=30, so only one decision per episode
        self.minibatch_size = 1  # TODO implement stimulation on multiple images at the same time
        self.output = np.zeros(10)
        self.output_norm = np.zeros(10)
        self.action = None
        self.reward = None

        # load and net, init sim
        self.net = self._build_net()
        self.sim = nengo_dl.Simulator(self.net,
                                      minibatch_size=self.minibatch_size)

        self.train_data, self.test_data = self._load_data('mnist.pkl.gz')
        self.train_data = {
            self.inp: self.train_data[0][:, None, :],
            self.out_p: self.train_data[1][:, None, :]
        }
        self.test_data = {
            self.inp:
            np.tile(self.test_data[0][:self.minibatch_size * 2, None, :],
                    (1, self.stim_steps, 1)),
            self.out_p_filt:
            np.tile(self.test_data[1][:self.minibatch_size * 2, None, :],
                    (1, self.stim_steps, 1))
        }
        self.rand_test_data = np.random.choice(
            self.test_data[self.inp].shape[0], self.minibatch_size)

        # gym specific vars
        self.TOTAL_TIME_STEPS = 2
        self.action_space = gym.spaces.Box(
            low=0, high=1, shape=(10, ),
            dtype=np.float32)  # gym.spaces.MultiBinary(10)
        # self.observation_space = gym.spaces.Tuple((gym.spaces.Discrete(10),
        #                                            gym.spaces.Box(low=0, high=1, shape=(10,), dtype=np.float32)))
        self.observation_space = gym.spaces.Box(low=0,
                                                high=1,
                                                shape=(10, ),
                                                dtype=np.float32)
        self.curr_step = -1
        self.curr_episode = -1
        self.action_episode_memory = []
Пример #11
0
def recall_nn(image, t=0.2):
    """
    recall the nengo network on the given image
    """
    if classes is None:
        setup()
    img = cnc.read_image(image)
    v = nn.nodes["cnn1"]
    with nn.net:
        i = nengo.Node(output=img.flatten())
        nengo.Connection(i, v, synapse=None, label="img_to_cnn")
    with nengo_dl.Simulator(nn.net, progress_bar=True) as sim:
        sim.run(t)
    return sim.data
Пример #12
0
def recall_nn(image, nn=None):
    """
    recall the nengo network on the given image
    """
    img = flat_cifar.img_numpy(image)
    if nn is None:
        nn = setup_nn()
    v = nn.nodes[0]
    o = nn.probes[0]
    with nn:
        i = nengo.Node(output=img.flatten())
        nengo.Connection(i, v, synapse=None, label="img_to_cnn")
    with nengo_dl.Simulator(nn, progress_bar=True) as sim:
        sim.step()
    return sim.data[o][0]
Пример #13
0
def r_nn(image):
    """
    recall the nengo network on the given image
    """
    img = flat_cifar.img_numpy(image)
    with nengo.Network() as nn:
        i = nengo.Node(output=img.flatten())
        v = nengo_dl.TensorNode(Vision(),
                                size_in=i_size,
                                size_out=n_class,
                                label="cnn")
        nengo.Connection(i, v, synapse=None, label="img_to_cnn")
        o = nengo.Probe(v, label="cnn_result")
    with nengo_dl.Simulator(nn) as sim:
        sim.step()
    return sim.data[o][0]
Пример #14
0
def test_nengo_dl_neurons(neuron_type, inference_only, Simulator, plt, allclose):
    install_dl_builders()

    dt = 0.0007

    n = 256
    encoders = np.ones((n, 1))
    gain = np.zeros(n)
    if isinstance(neuron_type, nengo.SpikingRectifiedLinear):
        bias = np.linspace(0, 1001, n)
    else:
        bias = np.linspace(0, 30, n)

    with nengo.Network() as model:
        nengo_dl.configure_settings(inference_only=inference_only)

        a = nengo.Ensemble(
            n, 1, neuron_type=neuron_type, encoders=encoders, gain=gain, bias=bias
        )
        ap = nengo.Probe(a.neurons)

    t_final = 1.0
    with nengo_dl.Simulator(model, dt=dt) as dl_sim:
        dl_sim.run(t_final)

    with Simulator(model, dt=dt) as loihi_sim:
        loihi_sim.run(t_final)

    rates_dlsim = (dl_sim.data[ap] > 0).sum(axis=0) / t_final
    rates_loihisim = (loihi_sim.data[ap] > 0).sum(axis=0) / t_final

    zeros = np.zeros((1, gain.size))
    rates_ref = neuron_type.rates(zeros, gain, bias, dt=dt).squeeze(axis=0)
    plt.plot(bias, rates_loihisim, "r", label="loihi sim")
    plt.plot(bias, rates_dlsim, "b-.", label="dl sim")
    plt.plot(bias, rates_ref, "k--", label="rates_ref")
    plt.legend(loc="best")

    atol = 1.0 / t_final  # the fundamental unit for our rates
    assert rates_ref.shape == rates_dlsim.shape == rates_loihisim.shape
    assert allclose(rates_dlsim, rates_ref, atol=atol, rtol=0, xtol=1)
    assert allclose(rates_loihisim, rates_ref, atol=atol, rtol=0, xtol=1)
Пример #15
0
    def predict(self, inputs):

        # param_file = 'networks/policy_params_1000000samples_250epochs'
        # dim = 256
        # maze_id_dim = 256
        # hidden_size = 1024
        # net_seed = 13
        # n_steps = 30
        # self.net = nengo.Network(seed=net_seed)
        # with self.net:
        #     # set some default parameters for the neurons that will make
        #     # the training progress more smoothly
        #     # net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
        #     # net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
        #     self.net.config[nengo.Connection].synapse = None
        #     neuron_type = nengo.LIF(amplitude=0.01)
        #
        #     # this is an optimization to improve the training speed,
        #     # since we won't require stateful behaviour in this example
        #     nengo_dl.configure_settings(stateful=False)
        #
        #     # the input node that will be used to feed in (context, location, goal)
        #     inp = nengo.Node(np.zeros((dim * 2 + maze_id_dim,)))
        #
        #     x = nengo_dl.Layer(tf.keras.layers.Dense(units=hidden_size))(inp)
        #     x = nengo_dl.Layer(neuron_type)(x)
        #
        #     out = nengo_dl.Layer(tf.keras.layers.Dense(units=2))(x)
        #
        #     self.out_p = nengo.Probe(out, label="out_p")
        #     self.out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")
        #
        self.sim = nengo_dl.Simulator(self.net, minibatch_size=1)
        self.sim.load_params(self.param_file)
        self.sim.compile(loss={self.out_p_filt: mse_loss})
        # self.n_steps = n_steps

        tiled_input = np.tile(inputs[:, None, :], (1, self.n_steps, 1))

        pred_eval = self.sim.predict(tiled_input)
        return pred_eval[self.out_p_filt][:, 10:, :].mean(axis=1)
Пример #16
0
def train(params_file="./keras_to_loihi_params", epochs=1, **kwargs):
    converter = nengo_dl.Converter(model, **kwargs)

    with nengo_dl.Simulator(converter.net, seed=0, minibatch_size=100) as sim:
        sim.compile(
            optimizer=tf.keras.optimizers.Adam(),
            loss={
                converter.outputs[output]: tf.keras.losses.MeanSquaredError()
            },
            metrics={
                converter.outputs[output]: tf.keras.metrics.MeanSquaredError()
            },
        )
        sim.fit(
            {converter.inputs[inp]: train_data},
            {converter.outputs[output]: train_truth},
            epochs=epochs,
        )

        # save the parameters to file
        sim.save_params(params_file)
    def nengo_run(self, testX):
        self.testX = testX
        print('mnist data 준비')

        # to nengo 로 한거지
        otn = toNengoModel(self.model_path)
        model = otn.get_model()
        inp = otn.get_inputProbe()
        pre_layer = otn.get_endLayer()

        # 돌리는 것
        with model:
            out_p = nengo.Probe(pre_layer)
            out_p_filt = nengo.Probe(pre_layer, synapse=0.01)

        # ----------------------------------------------------------- run
        sim = nengo_dl.Simulator(model, device="/cpu:0")

        # when testing our network with spiking neurons we will need to run it
        # over time, so we repeat the input/target data for a number of
        # timesteps.

        n_steps = 30
        print(self.testX.shape)  # 30, 28, 28, 1
        self.testX = self.testX.reshape((self.testX.shape[0], -1))
        print(self.testX.shape)  # 30, 784
        test_images = np.tile(self.testX[:, None, :], (1, n_steps, 1))
        print(test_images.shape)

        # load parameters
        print('load_params')
        sim.load_params("weights/mnist_params_adam_0.001_3_100")

        sim.compile(loss={out_p_filt: classification_accuracy})
        data = sim.predict(test_images)
        sim.close()
        print('simulator 종료')
        return data
Пример #18
0
def train(params_file="./keras_to_loihi_params", epochs=1, **kwargs):
    converter = nengo_dl.Converter(model, **kwargs)

    with nengo_dl.Simulator(converter.net, seed=0, minibatch_size=200) as sim:
        sim.compile(
            optimizer=tf.optimizers.RMSprop(0.001),
            loss={
                converter.outputs[dense1]:
                tf.losses.SparseCategoricalCrossentropy(from_logits=True)
            },
            metrics={
                converter.outputs[dense1]:
                tf.metrics.sparse_categorical_accuracy
            },
        )
        sim.fit(
            {converter.inputs[inp]: train_images},
            {converter.outputs[dense1]: train_labels},
            epochs=epochs,
        )

        # save the parameters to file
        sim.save_params(params_file)
Пример #19
0
    def __init__(self,
                 param_file,
                 dim=256,
                 maze_id_dim=256,
                 n_sensors=36,
                 hidden_size=1024,
                 net_seed=13,
                 n_steps=30):
        self.net = nengo.Network(seed=net_seed)
        with self.net:
            # set some default parameters for the neurons that will make
            # the training progress more smoothly
            # net.config[nengo.Ensemble].max_rates = nengo.dists.Choice([100])
            # net.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            self.net.config[nengo.Connection].synapse = None
            neuron_type = nengo.LIF(amplitude=0.01)

            # this is an optimization to improve the training speed,
            # since we won't require stateful behaviour in this example
            nengo_dl.configure_settings(stateful=False)

            # the input node that will be used to feed in (context, location, goal)
            inp = nengo.Node(np.zeros((n_sensors * 4 + maze_id_dim, )))

            x = nengo_dl.Layer(tf.keras.layers.Dense(units=hidden_size))(inp)
            x = nengo_dl.Layer(neuron_type)(x)

            out = nengo_dl.Layer(tf.keras.layers.Dense(units=dim))(x)

            self.out_p = nengo.Probe(out, label="out_p")
            self.out_p_filt = nengo.Probe(out, synapse=0.1, label="out_p_filt")

        self.sim = nengo_dl.Simulator(self.net, minibatch_size=1)
        self.sim.load_params(param_file)
        self.sim.compile(loss={self.out_p_filt: mse_loss})
        self.n_steps = n_steps
Пример #20
0
    def convert(self, add_probes=True, synapse=None, **kwargs):
        """ Run the NengoDL Converter on the above Keras net

        add_probes : bool, optional (Default: True)
            if False, no probes are added to the model, reduces simulation overhead
        """
        converter = nengo_dl.Converter(self.model, **kwargs)

        # create references to some nengo objects in the network IO objects
        self.nengo_input = converter.inputs[self.input]
        self.nengo_dense = converter.outputs[self.dense]

        net = converter.net

        self.input = converter.layers[self.input]
        self.conv0 = converter.layers[self.conv0]
        self.conv1 = converter.layers[self.conv1]
        self.output = converter.layers[self.dense]

        with net:
            # set our biases to non-trainable to make sure they're always 0
            net.config[self.conv0].trainable = False
            net.config[self.conv1].trainable = False

            if add_probes:
                # set up probes so to add the firing rates to the cost function
                self.probe_conv0 = nengo.Probe(self.conv0, label="probe_conv0")
                self.probe_conv1 = nengo.Probe(self.conv1, label="probe_conv1")
                self.probe_dense = nengo.Probe(self.output,
                                               label="probe_dense",
                                               synapse=synapse)

        sim = nengo_dl.Simulator(net,
                                 minibatch_size=self.minibatch_size,
                                 seed=self.seed)
        return sim, net
Пример #21
0
    def __init__(self, num_classes, num_layers, num_filters, kernel_sizes,
                input_size = (28, 28, 1),
                minibatch_size = 1,
                n_steps = 30,
                padding_value = 0.0,
                synapse = 0.1):

        self.num_classes = positive_int_check(num_classes, 'num_classes')
        self.num_layers = positive_int_check(num_layers, 'num_layers')

        assert len(num_filters) == num_layers
        assert len(kernel_sizes) == num_layers
        self.num_filters = num_filters
        self.kernel_sizes = kernel_sizes
        assert len(input_size) == 3
        self.input_size = input_size

        self.minibatch_size = positive_int_check(minibatch_size, 'minibatch_size')
        self.n_steps =  positive_int_check(n_steps, 'n_steps')
        self.padding_value = padding_value
        self.synapse = synapse

        self.inp, self.out_p, self.out_p_filt, net = self._build(num_classes, num_layers, num_filters, kernel_sizes)
        self.sim = nengo_dl.Simulator(net, minibatch_size = minibatch_size)
Пример #22
0
        #     stim_to_out = nengo.Connection(out_stim[s], outen[s])#, transform=np.eye(10))
        # x_to_out = nengo.Connection(x, outen, synapse=None, transform=np.eye(10))
        # # TODO check https://forum.nengo.ai/t/input-current-in-a-neuron/468 if needed
        # #  or search: https://forum.nengo.ai/search?q=stimulation
        # x = outen

        # we'll create two different output probes, one with a filter
        # (for when we're simulating the network over time and
        # accumulating spikes), and one without (for when we're
        # training the network using a rate-based approximation)
        out_p = nengo.Probe(x)
        out_p_filt = nengo.Probe(x, synapse=0.1)


    minibatch_size = 200
    sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)
    # sim.freeze_params(stim_conns)

    # add the single timestep to the training data
    train_data = {inp: train_data[0][:, None, :],
                  out_p: train_data[1][:, None, :]}

    # when testing our network with spiking neurons we will need to run it
    # over time, so we repeat the input/target data for a number of
    # timesteps. we're also going to reduce the number of test images, just
    # to speed up this example.
    n_steps = 30
    test_data = {
        inp: np.tile(test_data[0][:minibatch_size*2, None, :],
                     (1, n_steps, 1)),
        out_p_filt: np.tile(test_data[1][:minibatch_size*2, None, :],
                              weight_init(shape=(n_neurons, inp_dim)))

    conn_rec = nengo.Connection(ens.neurons,
                                ens.neurons,
                                transform=0 *
                                weight_init(shape=(n_neurons, n_neurons)))

    conn_b = nengo.Connection(
        ens.neurons,
        out,
        transform=0 * weight_init(shape=(out_dim, n_neurons)) / mem_tau)

    probe_out = nengo.Probe(out, synapse=0.01)
    probe_spikes = nengo.Probe(ens.neurons)

with nengo_dl.Simulator(net) as sim:
    if (os.path.exists("Resources/trained_model.npz")):
        sim.load_params("Resources/trained_model")
        print("Loaded simulation parameters")
    else:
        assert (False), "Failed to load trained network"

    sample_xor_input, target, _ = generate_xor_sample(duration, dt=dt)
    # - Reshape to (num_batches, time_steps, num_dimensions)
    sample_xor_input = np.reshape(sample_xor_input,
                                  newshape=(1, len(sample_xor_input), 1))
    sim.run(sample_xor_input.shape[1] * dt, data={inp: sample_xor_input})

    # - Do some plotting
    fig = plt.figure(figsize=(10, 5))
    plt.subplot(121)
Пример #24
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)
        
        if p.n_data != -1:
            files = random.sample(files, p.n_data)
            
        inputs = []
        targets = []
        for f in files:
            times, imgs, targs = davis_tracking.load_data(f, dt=p.dt, decay_time=p.decay_time,
                                                  separate_channels=p.separate_channels, 
                                                  saturation=p.saturation, merge=p.merge)
            inputs.append(imgs)
            targets.append(targs[:,:2])
                                
        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)
        
        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
            dt_test = p.dt*2
        elif p.test_set == 'one':
            times, imgs, targs = davis_tracking.load_data(test_file, dt=p.dt_test, decay_time=p.decay_time,
                                                  separate_channels=p.separate_channels, 
                                                  saturation=p.saturation, merge=p.merge)
            inputs_test = imgs
            targets_test = targs[:, :2]
            inputs_train = inputs_all
            targets_train = targets_all
            dt_test = p.dt_test
            
        if p.augment:
            inputs_train, targets_train = davis_tracking.augment(inputs_train, targets_train,
                                                                 separate_channels=p.separate_channels)                
                      
        if p.separate_channels:
            shape = (2, 180//p.merge, 240//p.merge)
        else:
            shape = (1, 180//p.merge, 240//p.merge)
        
        dimensions = shape[0]*shape[1]*shape[2]

        
        if p.normalize:
            magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions), axis=1)
            inputs_train = inputs_train*(1.0/magnitude[:,None,None])
            
            magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions), axis=1)
            inputs_test = inputs_test*(1.0/magnitude[:,None,None])
                    
        
        
        max_rate = 100
        amp = 1 / max_rate

        model = nengo.Network()
        with model:
            model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)
            model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])
            model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            model.config[nengo.Connection].synapse = None

            inp = nengo.Node(
                nengo.processes.PresentInput(inputs_test.reshape(-1, dimensions), dt_test),
                size_out=dimensions,
                )

            out = nengo.Node(None, size_in=2)
            
            if not p.split_spatial:
                # do a standard convnet
                conv1 = nengo.Convolution(p.n_features_1, shape, channels_last=False, strides=(p.stride_1,p.stride_1),
                                          kernel_size=(p.kernel_size_1, p.kernel_size_1))
                layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
                nengo.Connection(inp, layer1.neurons, transform=conv1)

                conv2 = nengo.Convolution(p.n_features_2, conv1.output_shape, channels_last=False, strides=(p.stride_2,p.stride_2),
                                          kernel_size=(p.kernel_size_2, p.kernel_size_2))
                layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
                nengo.Connection(layer1.neurons, layer2.neurons, transform=conv2)

                nengo.Connection(layer2.neurons, out, transform=nengo_dl.dists.Glorot())
            else:
                # do the weird spatially split convnet
                convnet = davis_tracking.ConvNet(nengo.Network())
                convnet.make_input_layer(
                        shape,
                        spatial_stride=(p.spatial_stride, p.spatial_stride), 
                        spatial_size=(p.spatial_size,p.spatial_size))
                nengo.Connection(inp, convnet.input)
                convnet.make_middle_layer(n_features=p.n_features_1, n_parallel=p.n_parallel, n_local=1,
                                          kernel_stride=(p.stride_1,p.stride_1), kernel_size=(p.kernel_size_1,p.kernel_size_1))
                convnet.make_middle_layer(n_features=p.n_features_2, n_parallel=p.n_parallel, n_local=1,
                                          kernel_stride=(p.stride_2,p.stride_2), kernel_size=(p.kernel_size_2,p.kernel_size_2))
                convnet.make_output_layer(2)
                nengo.Connection(convnet.output, out)
                         

            p_out = nengo.Probe(out)


        N = len(inputs_train)
        n_steps = int(np.ceil(N/p.minibatch_size))
        dl_train_data = {inp: np.resize(inputs_train, (p.minibatch_size, n_steps, dimensions)),
                         p_out: np.resize(targets_train, (p.minibatch_size, n_steps, 2))}
        N = len(inputs_test)
        n_steps = int(np.ceil(N/p.minibatch_size))
        dl_test_data = {inp: np.resize(inputs_test, (p.minibatch_size, n_steps, dimensions)),
                        p_out: np.resize(targets_test, (p.minibatch_size, n_steps, 2))}
        with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
            #loss_pre = sim.loss(dl_test_data)

            if p.n_epochs > 0:
                sim.train(dl_train_data, tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
                          n_epochs=p.n_epochs)

            loss_post = sim.loss(dl_test_data)

            sim.run_steps(n_steps, data=dl_test_data)

        data = sim.data[p_out].reshape(-1,2)[:len(targets_test)]
        
        rmse_test = np.sqrt(np.mean((targets_test-data)**2, axis=0))*p.merge          
        if plt:
            plt.plot(data*p.merge)
            plt.plot(targets_test*p.merge, ls='--')
            
        return dict(
            rmse_test = rmse_test,
            max_n_neurons = max([ens.n_neurons for ens in model.all_ensembles]),
            test_targets = targets_test,
            test_output = data,
            test_loss = loss_post
            )
                                       seed=seed + i)
    control_model_pes = LearningModel(neurons,
                                      dimensions,
                                      PES(),
                                      function_to_learn,
                                      convolve=convolve,
                                      seed=seed + i)
    control_model_nef = LearningModel(neurons,
                                      dimensions,
                                      None,
                                      function_to_learn,
                                      convolve=convolve,
                                      seed=seed + i)

    print("Iteration", i)
    with nengo_dl.Simulator(learned_model_mpes, device=device) as sim_mpes:
        print("Learning network (mPES)")
        sim_mpes.run(sim_time)
    with nengo_dl.Simulator(control_model_pes, device=device) as sim_pes:
        print("Control network (PES)")
        sim_pes.run(sim_time)
    with nengo_dl.Simulator(control_model_nef, device=device) as sim_nef:
        print("Control network (NEF)")
        sim_nef.run(sim_time)

    # essential statistics
    num_blocks = int(sim_time / learn_block_time)
    num_testing_blocks = int(num_blocks / 2)
    for sim, mod, lst in zip([sim_mpes, sim_pes, sim_nef], [
            learned_model_mpes, control_model_pes, control_model_nef
    ], [errors_iterations_mpes, errors_iterations_pes, errors_iterations_nef]):
Пример #26
0
train_data_out_categorical = train_data_out_categorical.reshape(-1, 1, 2)
valid_data_out = valid_data_out.reshape(-1, 1, 1)

train_data = {inp: train_data, out_p: train_data_out}
#train_data = {inp: train_data, out_p: train_data_out_categorical}

# for the test data evaluation we'll be running the network over time
# using spiking neurons, so we need to repeat the input/target data
# for a number of timesteps (based on the presentation_time)
test_data = {
    inp: np.tile(valid_data, (1, int(presentation_time / dt), 1)),
    out_p_filt: np.tile(valid_data_out, (1, int(presentation_time / dt), 1))
}

do_training = True
with nengo_dl.Simulator(net, minibatch_size=minibatch_size, seed=0) as sim:
    if do_training:
        sim.compile(loss={out_p_filt: classification_accuracy})
        print("accuracy before training: %.2f%%" %
                sim.evaluate(test_data[inp], {out_p_filt: test_data[out_p_filt]}, verbose=0)["loss"])

        # run training
        sim.compile(
                optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.001),
                loss={out_p: tf.losses.SparseCategoricalCrossentropy(from_logits=True)}
                #loss = {out_p: tf.compat.v1.nn.softmax_cross_entropy_with_logits_v2}
        )
        sim.fit(train_data[inp], {out_p: train_data[out_p]}, epochs=epoch)

        sim.compile(loss={out_p_filt: classification_accuracy})
        print("accuracy after training: %.2f%%" %
Пример #27
0
    def evaluate(self, p, plt):
        files = []
        sets = []
        for f in os.listdir(p.dataset_dir):
            if f.endswith('events'):
                files.append(os.path.join(p.dataset_dir, f))

        if p.test_set == 'one':
            test_file = random.sample(files, 1)[0]
            files.remove(test_file)

        if p.n_data != -1:
            files = random.sample(files, p.n_data)

        if len(p.load_params_from) > 0:
            params = np.load(p.load_params_from, allow_pickle=True)
        else:
            params = None

        strip_edges = 3  #  the number of edge pixels to remove due to convolution

        inputs = []
        targets = []
        targets_raw = []
        for f in files:
            times, imgs, targs = davis_tracking.load_data(
                f,
                dt=p.dt,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs.append(imgs)
            targets_raw.append(targs[:, :2])
            targets.append(
                davis_tracking.make_heatmap(targs,
                                            merge=p.merge,
                                            strip_edges=strip_edges).reshape(
                                                len(targs), -1))

        inputs_all = np.vstack(inputs)
        targets_all = np.vstack(targets)
        targets_all_raw = np.vstack(targets_raw)

        if p.test_set == 'odd':
            inputs_train = inputs_all[::2]
            inputs_test = inputs_all[1::2]
            targets_train = targets_all[::2]
            targets_test = targets_all[1::2]
            targets_test_raw = targets_all_raw[1::2]
            dt_test = p.dt * 2
        elif p.test_set == 'one':
            times, imgs, targs = davis_tracking.load_data(
                test_file,
                dt=p.dt_test,
                decay_time=p.decay_time,
                separate_channels=p.separate_channels,
                saturation=p.saturation,
                merge=p.merge)
            inputs_test = imgs

            targets_test_raw = targs
            targets_test = davis_tracking.make_heatmap(
                targs, merge=p.merge,
                strip_edges=strip_edges).reshape(len(targs), -1)
            inputs_train = inputs_all
            targets_train = targets_all
            dt_test = p.dt_test

        if p.separate_channels:
            shape = (2, 180 // p.merge, 240 // p.merge)
        else:
            shape = (1, 180 // p.merge, 240 // p.merge)
        output_shape = shape[1] - strip_edges * 2, shape[2] - strip_edges * 2

        dimensions = shape[0] * shape[1] * shape[2]

        if p.normalize:
            magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions),
                                       axis=1)
            inputs_train = inputs_train * (1.0 / magnitude[:, None, None])

            magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions),
                                       axis=1)
            inputs_test = inputs_test * (1.0 / magnitude[:, None, None])

        max_rate = 100
        amp = 1 / max_rate

        model = nengo.Network()
        with model:
            model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(
                amplitude=amp)
            model.config[nengo.Ensemble].max_rates = nengo.dists.Choice(
                [max_rate])
            model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
            model.config[nengo.Connection].synapse = None

            inp = nengo.Node(
                nengo.processes.PresentInput(
                    inputs_test.reshape(-1, dimensions), dt_test),
                size_out=dimensions,
            )

            out = nengo.Node(None, size_in=targets_train.shape[-1])

            if not p.split_spatial:
                # do a standard convnet
                init = params[2][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                conv1 = nengo.Convolution(p.n_features_1,
                                          shape,
                                          channels_last=False,
                                          strides=(1, 1),
                                          padding='valid',
                                          kernel_size=(3, 3),
                                          init=init)
                layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
                if params is not None:
                    layer1.gain = params[0]['gain']
                    layer1.bias = params[0]['bias']
                nengo.Connection(inp, layer1.neurons, transform=conv1)

                init = params[3][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                conv2 = nengo.Convolution(p.n_features_2,
                                          conv1.output_shape,
                                          channels_last=False,
                                          strides=(1, 1),
                                          padding='valid',
                                          kernel_size=(3, 3),
                                          init=init)
                layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
                if params is not None:
                    layer2.gain = params[1]['gain']
                    layer2.bias = params[1]['bias']
                nengo.Connection(layer1.neurons,
                                 layer2.neurons,
                                 transform=conv2)

                init = params[4][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                conv3 = nengo.Convolution(1,
                                          conv2.output_shape,
                                          channels_last=False,
                                          strides=(1, 1),
                                          padding='valid',
                                          kernel_size=(3, 3),
                                          init=init)

                nengo.Connection(layer2.neurons, out, transform=conv3)
            else:
                # do the weird spatially split convnet
                convnet = davis_tracking.ConvNet(nengo.Network())
                convnet.make_input_layer(shape,
                                         spatial_stride=(p.spatial_stride,
                                                         p.spatial_stride),
                                         spatial_size=(p.spatial_size,
                                                       p.spatial_size))
                nengo.Connection(inp, convnet.input)
                init = params[2][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                convnet.make_middle_layer(n_features=p.n_features_1,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          kernel_stride=(1, 1),
                                          kernel_size=(3, 3),
                                          init=init)
                init = params[3][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                convnet.make_middle_layer(n_features=p.n_features_2,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          kernel_stride=(1, 1),
                                          kernel_size=(3, 3),
                                          init=init)
                init = params[4][
                    'transform'].init if params is not None else nengo.dists.Uniform(
                        -1, 1)
                convnet.make_middle_layer(n_features=1,
                                          n_parallel=p.n_parallel,
                                          n_local=1,
                                          kernel_stride=(1, 1),
                                          kernel_size=(3, 3),
                                          init=init,
                                          use_neurons=False)
                convnet.make_merged_output(output_shape)
                nengo.Connection(convnet.output, out)

                if params is not None:
                    assert np.allclose(params[0]['gain'], 100, atol=1e-5)
                    assert np.allclose(params[1]['gain'], 100, atol=1e-5)
                    if np.max(np.abs(params[0]['bias'])) > 1e-8:
                        print(
                            'WARNING: biases are not yet being set on the neurons'
                        )
                    if np.max(np.abs(params[1]['bias'])) > 1e-8:
                        print(
                            'WARNING: biases are not yet being set on the neurons'
                        )
                    #assert np.allclose(params[0]['bias'], 0, atol=1e-4)
                    #assert np.allclose(params[1]['bias'], 0, atol=1e-4)
                    #TODO: actually do this!  Even though it involves annoying slicing

            p_out = nengo.Probe(out)

        N = len(inputs_train)
        n_steps = int(np.ceil(N / p.minibatch_size))
        dl_train_data = {
            inp:
            np.resize(inputs_train, (p.minibatch_size, n_steps, dimensions)),
            p_out:
            np.resize(targets_train,
                      (p.minibatch_size, n_steps, targets_train.shape[-1]))
        }
        N = len(inputs_test)
        n_steps = int(np.ceil(N / p.minibatch_size))
        dl_test_data = {
            inp:
            np.resize(inputs_test, (p.minibatch_size, n_steps, dimensions)),
            p_out:
            np.resize(targets_test,
                      (p.minibatch_size, n_steps, targets_train.shape[-1]))
        }
        with nengo_dl.Simulator(model, minibatch_size=p.minibatch_size) as sim:
            #loss_pre = sim.loss(dl_test_data)

            if p.n_epochs > 0:
                sim.train(
                    dl_train_data,
                    tf.train.RMSPropOptimizer(learning_rate=p.learning_rate),
                    n_epochs=p.n_epochs)

            loss_post = sim.loss(dl_test_data)

            sim.run_steps(n_steps, data=dl_test_data)

            if p.save_params:
                assert not p.split_spatial

                objects = list(model.all_ensembles) + list(
                    model.all_connections)
                params = sim.get_nengo_params(objects, as_dict=False)

                np.save(
                    os.path.join(p.data_dir, p.data_filename + '.params.npy'),
                    params)

        data = sim.data[p_out].reshape(
            -1, targets_train.shape[-1])[:len(targets_test)]

        data_peak = np.array(
            [davis_tracking.find_peak(d.reshape(output_shape)) for d in data])
        target_peak = np.array([
            davis_tracking.find_peak(d.reshape(output_shape))
            for d in targets_test
        ])

        rmse_test = np.sqrt(np.mean(
            (target_peak - data_peak)**2, axis=0)) * p.merge
        if plt:
            plt.plot(data_peak * p.merge)
            plt.plot(target_peak * p.merge, ls='--')
            plt.plot((targets_test_raw - strip_edges) * p.merge, ls=':')

        return dict(
            rmse_test=rmse_test,
            max_n_neurons=max([ens.n_neurons for ens in model.all_ensembles]),
            #test_targets = targets_test,
            test_targets_raw=targets_test_raw,
            #test_output = data,
            target_peak=target_peak,
            data_peak=data_peak,
            test_loss=loss_post,
        )
Пример #28
0
train_data_out = train_data_out.reshape(-1, 1, 1)
valid_data_out = valid_data_out.reshape(-1, 1, 1)

train_data = {inp: train_data, out_p: train_data_out}

# for the test data evaluation we'll be running the network over time
# using spiking neurons, so we need to repeat the input/target data
# for a number of timesteps (based on the presentation_time)
test_data = {
    inp: np.tile(valid_data, (1, int(presentation_time / dt), 1)),
    out_p_filt: np.tile(valid_data_out, (1, int(presentation_time / dt), 1))
}

do_training = True
with nengo_dl.Simulator(net,
                        minibatch_size=minibatch_size,
                        seed=0,
                        progress_bar=False) as sim:
    if do_training:
        sim.compile(loss={out_p_filt: classification_accuracy})
        print("accuracy before training: %.2f%%" %
              sim.evaluate(test_data[inp], {out_p_filt: test_data[out_p_filt]},
                           verbose=0)["loss"])

        # run training
        sim.compile(
            optimizer=tf.keras.optimizers.RMSprop(learning_rate=0.001),
            loss={
                out_p:
                tf.losses.SparseCategoricalCrossentropy(from_logits=True)
            })
        sim.fit(train_data[inp], train_data[out_p], epochs=epoch)
                                         sample_every=sample_every)

# Create the Simulator and run it
printlv2(f"Backend is {backend}, running on ", end="")
if backend == "nengo_core":
    printlv2("CPU")
    cm = nengo.Simulator(model,
                         seed=seed,
                         dt=timestep,
                         optimize=optimize,
                         progress_bar=progress_bar)
if backend == "nengo_dl":
    printlv2(device)
    cm = nengo_dl.Simulator(model,
                            seed=seed,
                            dt=timestep,
                            progress_bar=progress_bar,
                            device=device)
start_time = time.time()
with cm as sim:
    for i in range(simulation_discretisation):
        printlv2(
            f"\nRunning discretised step {i + 1} of {simulation_discretisation}"
        )
        sim.run(sim_time / simulation_discretisation)
printlv2(
    f"\nTotal time for simulation: {time.strftime( '%H:%M:%S', time.gmtime( time.time() - start_time ) )} s"
)

if probe > 0:
    # essential statistics
Пример #30
0
def three():
    K.clear_session()
    data = []
    """
    labels = [['black', 'jeans']]*344 + [['blue', 'dress']]*386 + [['blue', 'jeans']]*356 + [['blue', 'shirt']]*369 + \
             [['blue', 'sweater']]*99 + [['gray', 'shorts']]*96 + [['red', 'dress']]*380 + [['red', 'shirt']]*332
    """
    EPOCHS = 25
    INIT_LR = 1e-3
    BS = 32
    IMAGE_DIMS = (96, 96, 3)

    checkpoints_dir = '.\\checkpoints'
    # load the image, pre-process it, and store it in the data list
    import glob
    image_types = ('*.jpg', '*.jpeg', '*.png')
    files = []
    labels = []
    folders = glob.glob(
        "D:\\Users\\bob\\PycharmProjects\\test_recommendation\\nengo_classification\\dataset\\*\\"
    )
    for image_type in image_types:
        files.extend(
            glob.glob(
                "D:\\Users\\bob\\PycharmProjects\\test_recommendation\\nengo_classification\\dataset\\*\\"
                + image_type))
    print(len(files))
    for f in files:
        """
        image = cv2.imread(f)
        image = cv2.resize(image, (IMAGE_DIMS[1], IMAGE_DIMS[0]))
        image = img_to_array(image)
        data.append(image)
        """
        label = f.split("\\")[-2].split("_")
        labels.append(label)

    # scale the raw pixel intensities to the range [0, 1]
    data = np.load("data.npy")
    labels = np.array(labels)

    # print("[INFO] data matrix: {} images ({:.2f}MB)".format(len(imagePaths), data.nbytes / (1024 * 1000.0)))

    # binarize the labels using scikit-learn's special multi-label
    # binarizer implementation
    # print("[INFO] class labels:")
    mlb = MultiLabelBinarizer()
    labels = mlb.fit_transform(labels)

    #print(labels)
    """
    # loop over each of the possible class labels and show them
    for (i, label) in enumerate(mlb.classes_):
        print("{}. {}".format(i + 1, label))

    print(data.shape)
    print(labels.shape)
    # partition the data into training and testing splits using 80% of
    # the data for training and the remaining 20% for testing
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels, test_size=0.2, random_state=42)

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1,
                             height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
                             horizontal_flip=True, fill_mode="nearest")

    # initialize the model using a sigmoid activation as the final layer
    # in the network so we can perform multi-label classification
    print("[INFO] compiling model...")
    model = SmallerVGGNet.build(
        width=IMAGE_DIMS[1], height=IMAGE_DIMS[0],
        depth=IMAGE_DIMS[2], classes=len(mlb.classes_),
        finalAct="sigmoid")

    # initialize the optimizer (SGD is sufficient)
    opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)

    # compile the model using binary cross-entropy rather than
    # categorical cross-entropy -- this may seem counterintuitive for
    # multi-label classification, but keep in mind that the goal here
    # is to treat each output label as an independent Bernoulli
    # distribution
    model.compile(loss="binary_crossentropy", optimizer=opt,
                  metrics=["accuracy"])

    print("[INFO] training network...")
    H = model.fit_generator(
        aug.flow(trainX, trainY, batch_size=BS),
        validation_data=(testX, testY),
        steps_per_epoch=len(trainX) // BS,
        epochs=EPOCHS, verbose=1)

    # save the model to disk
    print("[INFO] serializing network...")
    model.save("fashion_model.h5")
    model.save_weights("fashion_model_weights.h5")

    plt.style.use("ggplot")
    plt.figure()
    N = EPOCHS
    plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
    plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend(loc="upper left")
    plt.savefig("plot.png")
    """
    class KerasNode:
        def __init__(self, keras_model, mlb):
            self.model = keras_model
            self.mlb = mlb

        def pre_build(self, *args):
            self.model = clone_model(self.model)

        def __call__(self, t, x):
            # pre-process the image for classification
            img = tf.reshape(x, (-1, ) + IMAGE_DIMS)
            #print(img.shape)
            """
            img = cv2.resize(img, (96, 96))
            img = img.astype("float") / 255.0
            img = img_to_array(img)
            img = np.expand_dims(img, axis=0)
            """
            return self.model.call(img)
            #eturn self.model.call(tf.convert_to_tensor(img, dtype=tf.float32))

        def post_build(self, sess, rng):
            with sess.as_default():
                self.model.load_weights("fashion_model_weights.h5")
                self.mlb = pickle.loads(open("mlb.pickle", "rb").read())
            #pass

    net_input_shape = np.prod((96, 96, 3))  # because input will be a vector

    with nengo.Network() as net:
        # create a normal input node to feed in our test image.
        # the `np.ones` array is a placeholder, these
        # values will be replaced with the Fashion MNIST images
        # when we run the Simulator.
        input_node = nengo.Node(output=np.ones((net_input_shape, )))

        # create a TensorNode containing the KerasNode we defined
        # above, passing it the Keras model we created.
        # we also need to specify size_in (the dimensionality of
        # our input vectors, the flattened images) and size_out (the number
        # of classification classes output by the keras network)
        model = load_model("fashion_model.h5")
        mlb = pickle.loads(open("mlb.pickle", "rb").read())
        keras_node = nengo_dl.TensorNode(KerasNode(model, mlb),
                                         size_in=net_input_shape,
                                         size_out=len(mlb.classes_))

        # connect up our input to our keras node
        nengo.Connection(input_node, keras_node, synapse=None)

        # add a probes to collect output of keras node
        keras_p = nengo.Probe(keras_node)
        input_p = nengo.Probe(input_node)

    minibatch_size = 20

    np.random.seed(3)
    test_inds = np.random.randint(low=0,
                                  high=data.shape[0],
                                  size=(minibatch_size, ))
    test_inputs = data[test_inds]

    # flatten images so we can pass them as vectors to the input node
    test_inputs = test_inputs.reshape((-1, net_input_shape))

    # unlike in Keras, NengoDl simulations always run over time.
    # so we need to add the time dimension to our data (even though
    # in this case we'll just run for a single timestep).
    test_inputs = test_inputs[:, None, :]

    with nengo_dl.Simulator(net, minibatch_size=len(test_inputs)) as sim:
        sim.step(data={input_node: test_inputs})

    tensornode_output = sim.data[keras_p]

    for i in range(len(test_inputs)):
        plt.figure()
        b, g, r = cv2.split(data[test_inds[i]])
        rgb_img = cv2.merge([r, g, b])
        plt.imshow(rgb_img)
        print("[INFO] classifying ...")
        proba = tensornode_output[i][0]
        print(proba)
        idxs = np.argsort(proba)[::-1]
        print(idxs)
        print(np.argmax(tensornode_output[i, 0]))
        plt.axis("off")
        plt.title("%s, %s" % (mlb.classes_[idxs[0]], mlb.classes_[idxs[1]]))
        plt.show()