示例#1
0
def test_train(tf_graph, mocker, capfd):
    pd = PerceptualDiscrimination(dt=10, tau=100, T=2000, N_batch=128)
    gen = pd.batch_generator()

    params = get_params()
    rnn = RNN(params)

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))

    pd1 = PerceptualDiscrimination(dt=params['dt'],
                                   tau=params['tau'],
                                   T=2000,
                                   N_batch=params['N_batch'])
    gen1 = pd1.batch_generator()
    assert rnn.is_initialized is False
    assert rnn.is_built is False
    rnn.train(gen1)
    assert rnn.is_initialized is True
    assert rnn.is_built is True
    out, _ = capfd.readouterr()
    assert out != ""
示例#2
0
def test_save(tf_graph, mocker, tmpdir):
    save_weights_path = str(tmpdir.dirpath("save_weights.npz"))
    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.save(save_weights_path)
    assert "No weights to return" in str(excinfo.value)
    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))
    rnn.build()
    rdm1 = rd.RDM(dt=params['dt'],
                  tau=params['tau'],
                  T=2000,
                  N_batch=params['N_batch'])
    gen1 = rdm1.batch_generator()
    rnn.train(gen1)

    assert not tmpdir.dirpath("save_weights.npz").check(exists=1)
    rnn.save(save_weights_path)
    assert tmpdir.dirpath("save_weights.npz").check(exists=1)

    tmpdir.dirpath("save_weights.npz").remove()
示例#3
0
def test_custom_loss(tf_graph, mocker):

    params = get_params()
    params['loss_function'] = 'my_mean_squared_error'
    rnn = RNN(params)

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))

    with pytest.raises(UserWarning) as excinfo:
        rnn.build()
    assert 'my_mean_squared_error' in str(excinfo.value)
    rnn.destruct()

    params['my_mean_squared_error'] = mean_squared_error
    rnn = RNN(params)

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))

    rnn.build()
示例#4
0
def test_get_weights(tf_graph,mocker):
	params = get_params()
	rnn = RNN(params)
	
	mocker.patch.object(RNN, 'forward_pass')
	RNN.forward_pass.return_value = tf.fill([params['N_batch'], params['N_steps'], params['N_out']], float('nan')), tf.fill([params['N_batch'], params['N_steps'], params['N_rec']], float('nan'))

	assert type(rnn.get_weights()) is dict
示例#5
0
def test_build(tf_graph, mocker):
	pd = PerceptualDiscrimination(dt = 10, tau = 100, T = 2000, N_batch = 128)  
	gen = pd.batch_generator()

	params = get_params()
	rnn = RNN(params)
	mocker.patch.object(RNN, 'forward_pass')
	RNN.forward_pass.return_value = tf.fill([params['N_batch'], params['N_steps'], params['N_out']], float('nan')), tf.fill([params['N_batch'], params['N_steps'], params['N_rec']], float('nan'))
	rnn.build()
示例#6
0
def test_custom_loss(tf_graph, mocker):

    params = get_params()
    params['custom_regularization'] = my_custom_regularization
    rnn = RNN(params)

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))
    rnn.build()
示例#7
0
def test_train(tf_graph, mocker, capfd):
    rdm = rd.RDM(dt=10, tau=100, T=2000, N_batch=128)
    gen = rdm.batch_generator()

    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.train(gen)
    assert 'build' in str(excinfo.value)

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))
    rnn.build()

    rdm1 = rd.RDM(dt=params['dt'],
                  tau=params['tau'],
                  T=2000,
                  N_batch=params['N_batch'])
    gen1 = rdm1.batch_generator()
    assert rnn.is_initialized is False
    rnn.train(gen1)
    assert rnn.is_initialized is True
    out, _ = capfd.readouterr()
    assert out != ""
示例#8
0
def test_minimal_info_rnn(tf_graph):
    params = get_params()
    del params['name']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'name' in str(excinfo.value)

    params = get_params()
    del params['N_in']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'N_in' in str(excinfo.value)

    params = get_params()
    del params['N_rec']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'N_rec' in str(excinfo.value)

    params = get_params()
    del params['N_out']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'N_out' in str(excinfo.value)

    params = get_params()
    del params['N_steps']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'N_steps' in str(excinfo.value)

    params = get_params()
    del params['dt']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'dt' in str(excinfo.value)

    params = get_params()
    del params['tau']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'tau' in str(excinfo.value)

    params = get_params()
    del params['N_batch']
    with pytest.raises(KeyError) as excinfo:
        RNN(params)
    assert 'N_batch' in str(excinfo.value)

    # Test RNN works works if minimal info given.
    params = get_params()
    RNN(params)
示例#9
0
def test_initializer_rnn(tf_graph):
    params = get_params()
    params = extend_params(params)
    params['initializer'] = GaussianSpectralRadius(N_in=params['N_in'],
                                                   N_rec=params['N_rec'],
                                                   N_out=params['N_out'],
                                                   autapses=True,
                                                   spec_rad=1.1)
    RNN(params)
示例#10
0
def test_train_iters(tf_graph, mocker, capfd):
    params = get_params()

    pd1 = PerceptualDiscrimination(dt=params['dt'],
                                   tau=params['tau'],
                                   T=2000,
                                   N_batch=params['N_batch'])
    gen1 = pd1.batch_generator()

    for remainder in [0, 1]:
        train_params = {}
        train_params['training_iters'] = params[
            'N_batch'] + remainder  # number of iterations to train for Default: 10000
        train_params[
            'loss_epoch'] = 1  # Compute and record loss every 'loss_epoch' epochs. Default: 10
        train_params['verbosity'] = True

        rnn = RNN(params)
        mocker.patch.object(RNN, 'forward_pass')
        RNN.forward_pass.return_value = tf.fill(
            [params['N_batch'], params['N_steps'], params['N_out']],
            float('nan')), tf.fill(
                [params['N_batch'], params['N_steps'], params['N_rec']],
                float('nan'))

        rnn.train(gen1, train_params)
        rnn.destruct()

        out, _ = capfd.readouterr()
        N_epochs = int(
            ceil(train_params['training_iters'] / (params['N_batch'] * 1.0)))
        assert "Iter " + str(N_epochs * params['N_batch']) in out
        assert "Iter " + str((N_epochs + 1) * params['N_batch']) not in out
示例#11
0
def test_test(mocker):
    rdm = rd.RDM(dt=10, tau=100, T=2000, N_batch=128)
    gen = rdm.batch_generator()
    x, y, m, p = next(gen)

    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.test(x)
    assert 'build' in str(excinfo.value)

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))
    rnn.build()

    rnn.test(x)
示例#12
0
def test_save(tf_graph, mocker, tmpdir):
	save_weights_path = str(tmpdir.dirpath("save_weights.npz"))
	params = get_params()
	rnn = RNN(params)

	mocker.patch.object(RNN, 'forward_pass')
	RNN.forward_pass.return_value = tf.fill([params['N_batch'], params['N_steps'], params['N_out']], float('nan')), tf.fill([params['N_batch'], params['N_steps'], params['N_rec']], float('nan'))
	
	pd1 = PerceptualDiscrimination(dt = params['dt'], tau = params['tau'], T = 2000, N_batch = params['N_batch'])  
	gen1 = pd1.batch_generator()
	rnn.train(gen1)

	assert not tmpdir.dirpath("save_weights.npz").check(exists=1)
	rnn.save(save_weights_path)
	assert tmpdir.dirpath("save_weights.npz").check(exists=1)

	tmpdir.dirpath("save_weights.npz").remove()
示例#13
0
def test_train_train_params_file_creation(tf_graph, mocker, tmpdir, capfd):
	params = get_params()

	pd1 = PerceptualDiscrimination(dt = params['dt'], tau = params['tau'], T = 2000, N_batch = params['N_batch'])  
	gen1 = pd1.batch_generator()
	pd2 = PerceptualDiscrimination(dt = params['dt'], tau = params['tau'], T = 1000, N_batch = params['N_batch'])
	gen2 = pd2.batch_generator()

	mocker.patch.object(RNN, 'forward_pass')
	RNN.forward_pass.return_value = tf.fill([params['N_batch'], params['N_steps'], params['N_out']], float('nan')), tf.fill([params['N_batch'], params['N_steps'], params['N_rec']], float('nan'))
	
	rnn =RNN(params)
	rnn.build()

	train_params = {}
	train_params['save_weights_path'] =  str(tmpdir.dirpath("save_weights.npz")) # Where to save the model after training. Default: None
	train_params['training_iters'] = 1000 # number of iterations to train for Default: 10000
	train_params['learning_rate'] = .01 # Sets learning rate if use default optimizer Default: .001
	train_params['loss_epoch'] = 20 # Compute and recopd loss every 'loss_epoch' epochs. Default: 10
	train_params['verbosity'] = False
	train_params['save_training_weights_epoch'] = 10 # save training weights every 'save_training_weights_epoch' epochs. Default: 100
	train_params['training_weights_path'] = str(tmpdir.dirpath("training_weights")) # where to save training weights as training progresses. Default: None
	train_params['generator_function'] = gen2 # replaces trial_batch_generator with the generator_function when not none. Default: None
	train_params['optimizer'] = tf.compat.v1.train.AdamOptimizer(learning_rate=train_params['learning_rate']) # What optimizer to use to compute gradients. Default: tf.train.AdamOptimizer(learning_rate=train_params['learning_rate'])
	train_params['clip_grads'] = False # If true, clip gradients by norm 1. Default: True
	
	assert not tmpdir.dirpath("save_weights.npz").check(exists=1)
	assert not tmpdir.dirpath("training_weights" + str(train_params['save_training_weights_epoch'])).check(exists=1)
	rnn.train(gen1, train_params)


	assert rnn.is_initialized is True
	out, _ = capfd.readouterr()
	print(out)
	assert out==""
	assert tmpdir.dirpath("save_weights.npz").check(exists=1)
	assert tmpdir.dirpath("training_weights" + str(train_params['save_training_weights_epoch'])+ ".npz").check(exists=1)
示例#14
0
def test_forward_pass(tf_graph):
    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.forward_pass()
    assert 'forward_pass' in str(excinfo.value)
示例#15
0
def test_destruct(tf_graph, mocker):
    params = get_params()
    rnn1 = RNN(params)
    rnn1.destruct()
    rnn2 = RNN(params)
    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))
    rnn2.build()
    rnn2.destruct()
    rnn3 = RNN(params)
示例#16
0
def test_load_weights_path_rnn(tf_graph, mocker, tmpdir, capfd):
    params = get_params()

    pd1 = PerceptualDiscrimination(dt=params['dt'],
                                   tau=params['tau'],
                                   T=2000,
                                   N_batch=params['N_batch'])
    gen1 = pd1.batch_generator()

    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))

    rnn = RNN(params)
    rnn.build()

    train_params = {}
    train_params['save_weights_path'] = str(
        tmpdir.dirpath("save_weights.npz")
    )  # Where to save the model after training. Default: None
    train_params['verbosity'] = False

    ### save out some weights to test with and destroy the rnn that created them
    assert not tmpdir.dirpath("save_weights.npz").check(exists=1)
    rnn.train(gen1, train_params)

    assert rnn.is_initialized is True
    out, _ = capfd.readouterr()
    print(out)
    assert out == ""
    assert tmpdir.dirpath("save_weights.npz").check(exists=1)
    rnn.destruct()

    ### Make sure loading weights fails on nonexistent file
    params['load_weights_path'] = "nonexistent"
    with pytest.raises(EnvironmentError) as excinfo:
        rnn = RNN(params)
    assert "No such file" in str(excinfo.value)
    rnn.destruct()

    ### Ensure success when loading weights created previously
    params['load_weights_path'] = str(tmpdir.dirpath("save_weights.npz"))
    rnn = RNN(params)

    tmpdir.dirpath("save_weights.npz").remove()
示例#17
0
def test_extra_info_rnn(tf_graph):
    params = get_params()
    params = extend_params(params)
    RNN(params)
示例#18
0
def test_recurrent_timestep(tf_graph):
    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.recurrent_timestep(1, 2)
    assert 'recurrent_timestep' in str(excinfo.value)
示例#19
0
def test_get_weights(tf_graph, mocker):
    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.get_weights()
    assert 'No weights to return yet -- model has not yet been initialized.' in str(
        excinfo.value)
    mocker.patch.object(RNN, 'forward_pass')
    RNN.forward_pass.return_value = tf.fill(
        [params['N_batch'], params['N_steps'], params['N_out']],
        float('nan')), tf.fill(
            [params['N_batch'], params['N_steps'], params['N_rec']],
            float('nan'))
    rnn.build()
    with pytest.raises(UserWarning) as excinfo:
        rnn.get_weights()
    assert 'No weights to return yet -- model has not yet been initialized.' in str(
        excinfo.value)
    rdm1 = rd.RDM(dt=params['dt'],
                  tau=params['tau'],
                  T=2000,
                  N_batch=params['N_batch'])
    gen1 = rdm1.batch_generator()
    rnn.train(gen1)
    assert type(rnn.get_weights()) is dict
示例#20
0
def test_output_timestep(tf_graph):
    params = get_params()
    rnn = RNN(params)
    with pytest.raises(UserWarning) as excinfo:
        rnn.output_timestep(1)
    assert 'output_timestep' in str(excinfo.value)