def test_neuron_update_fwd(self):
    tf.reset_default_graph()
    tf.disable_v2_behavior()
    n_in, n_out = 10, 5
    inp, out, w = random_dense(n_in, n_out)
    env = blur_env.tf_env
    pre, post, synapse, genome, network_spec = get_blur_state(env, inp, out, w)
    pre_fwd, post_fwd = blur.dense_neuron_update(
        pre,
        post,
        synapse,
        inp_act=None,
        out_act=sigmoid_with_grad,
        neuron_genome=genome.neuron,
        update_type=synapse_util.UpdateType.FORWARD,
        global_spec=network_spec,
        env=env)

    inp = tf.constant(inp.astype(blur_env.NP_FLOATING_TYPE))
    ww = w.astype(blur_env.NP_FLOATING_TYPE)
    inp_with_bias = tf.concat([inp[Ellipsis, 0], [[[[1]]]]], axis=-1)
    exp_results = inp_with_bias @ ww

    with tf.Session() as s:
      s.run(tf.initialize_all_variables())
      self.assertAllClose(pre_fwd, pre)
      self.assertAllClose(post_fwd[Ellipsis, 0], tf.math.sigmoid(exp_results))
      self.assertAllClose(post_fwd[Ellipsis, 1],
                          d_sigmoid(out[Ellipsis, 0] + exp_results))
示例#2
0
    def test_neuron_update_bwd(self):
        tf.reset_default_graph()
        tf.disable_v2_behavior()
        n_in, n_out = 10, 5
        inp, out, w = random_dense(n_in, n_out)
        env = blur_env.tf_env
        pre, post, synapse, genome, network_spec = get_blur_state(
            env, inp, out, w)

        inp_act_fn = lambda x: x
        out_act_fn = sigmoid_with_grad

        pre_fwd, post_fwd = blur.dense_neuron_update(
            pre,
            post,
            synapse,
            inp_act=None,
            out_act=out_act_fn,
            neuron_genome=genome.neuron,
            update_type=synapse_util.UpdateType.FORWARD,
            global_spec=network_spec,
            env=env)

        pre_bkw, _ = blur.dense_neuron_update(
            pre_fwd,
            post_fwd,
            synapse=synapse,
            inp_act=inp_act_fn,
            out_act=out_act_fn,
            neuron_genome=genome.neuron,
            update_type=synapse_util.UpdateType.BACKWARD,
            global_spec=network_spec,
            env=env)

        inp = tf.constant(inp.astype(blur_env.NP_FLOATING_TYPE))
        ww = w.astype(blur_env.NP_FLOATING_TYPE)

        exp_result = post_fwd[Ellipsis, 1] @ tf.transpose(ww, (0, 1, 3, 2))

        with tf.Session() as s:
            s.run(tf.initialize_all_variables())
            self.assertAllClose(pre_bkw[Ellipsis, 0], pre_fwd[Ellipsis, 0])
            self.assertAllClose(
                pre_bkw[Ellipsis, 1],
                exp_result[Ellipsis, :-1] * pre_fwd[Ellipsis, 1])
示例#3
0
    def test_synapse_derivative(self):
        tf.reset_default_graph()
        tf.disable_v2_behavior()
        n_in, n_out = 10, 5
        inp, out, w = random_dense(n_in, n_out)

        env = blur_env.tf_env
        pre, post, synapse, genome, network_spec = get_blur_state(
            env, inp, out, w)
        post = np.concatenate(2 * [np.zeros_like(out)],
                              axis=-1).astype(blur_env.NP_FLOATING_TYPE)

        pre_fwd, post_fwd = blur.dense_neuron_update(
            pre,
            post,
            synapse,
            inp_act=None,
            out_act=sigmoid_with_grad,
            neuron_genome=genome.neuron,
            update_type=synapse_util.UpdateType.FORWARD,
            global_spec=network_spec,
            env=env)

        hebbian_update = blur.get_hebbian_update(pre_fwd, post_fwd,
                                                 genome.synapse.transform,
                                                 network_spec, env)

        hebbian_update_submatrix = synapse_util.synapse_submatrix(
            hebbian_update, n_in, synapse_util.UpdateType.FORWARD)

        inp = tf.constant(inp.astype(blur_env.NP_FLOATING_TYPE))
        inp_with_bias = tf.concat([inp[Ellipsis, 0], [[[[1]]]]], axis=-1)
        ww = tf.constant(w.astype(blur_env.NP_FLOATING_TYPE))
        out = tf.nn.sigmoid(inp_with_bias @ ww)
        grad_w = tf.gradients(out, ww)

        with tf.Session() as s:
            s.run(tf.initialize_all_variables())
            hebb_update, grad_w_val = s.run(
                [hebbian_update_submatrix[Ellipsis, 0], grad_w[0]])
        self.assertAllClose(hebb_update, grad_w_val)