Пример #1
0
def test_ais_init(nnet_type, betamode):

    n_runs = 5
    
    nnet = nnet_for_testing(nnet_type)
    dataset = Random('probability')
    data = B.get_value(dataset.train.data)

    if betamode == 'n_betas':
        n_betas = 100
        betas = None
    elif betamode == 'betas':
        n_betas = None
        n_betas_init = 25
        betas = np.linspace(0, 1, n_betas_init)
    else:
        raise NotImplementedError

    ais = estimators.AIS(nnet, data, n_runs, n_betas=n_betas, betas=betas)

    assert hasattr(ais, 'dbm_a')
    assert hasattr(ais, 'dbm_b')
    assert hasattr(ais, 'dbm_b_sampler')
    assert hasattr(ais, 'logZa')
    assert hasattr(ais, 'init_sample_ls')
    assert ais.n_runs == n_runs

    ais_betas = B.get_value(ais.betas)
    if betamode == 'n_betas':
        assert ais.n_betas == n_betas
        assert_allclose(ais_betas, np.linspace(0, 1, n_betas))
    elif betamode == 'betas':
        assert ais.n_betas == n_betas_init
        assert_allclose(ais_betas, betas)
Пример #2
0
def test_models(nnet_type):
    batch_size = 6
    n_epoch = 1

    data = random.Random('probability')

    nnet = nnet_for_testing(nnet_type)

    nnet = initializers.init_standard(nnet, data)
    optimizer = optimizers.SGD()
    trainer = training.CD(nnet, nb_pos_steps=2, nb_neg_steps=2)
    model = Model(nnet, optimizer, trainer)

    # test train_on_batch
    out = model.train_on_batch(data.train.data)
    assert out.size == 1

    # predict_on_batch
    out = model.predict_on_batch(data.valid.data)
    assert out.size == 1

    # test fit
    out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size)
    assert isinstance(out, History)

    # test validation data
    out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size,
                    validation_data=data.valid.data)
    assert isinstance(out, History)
Пример #3
0
def test_Sampler_init(nnet_type, constant):
    beta = 1.0
    nnet = nnet_for_testing(nnet_type)

    constant_ls = []
    if constant is not None:
        constant_ls = [constant]

    sampler = samplers.Sampler(nnet)
    sampler.set_param(beta=beta, constant=constant_ls)

    assert sampler.beta == beta
    assert sampler.constant == constant_ls
Пример #4
0
def test_training_loss(nnet_type, training_type):
    nb_pos_steps = 2
    nb_neg_steps = 2
    batch_size = 6

    nnet = nnet_for_testing(nnet_type)
    train = _init_training(training_type, nnet, nb_pos_steps, nb_neg_steps,
                           batch_size)
    inputs, data = _init_data(batch_size)

    loss = train.loss_fn()
    fn = B.function([inputs], loss(inputs), updates=train.updates)

    output = fn(data)
    assert output.size == 1
Пример #5
0
def test_dbm_init(nnet_type):
    nnet = nnet_for_testing(nnet_type)

    layers = nnet.layers
    synapses = nnet.synapses
    parts = layers + synapses
    weights = []
    for lay in layers:
        weights.append(lay.b)
    for syn in synapses:
        weights.append(syn.W)

    assert nnet.parts == parts
    assert nnet.trainable_weights == weights
    assert nnet.non_trainable_weights == []
Пример #6
0
def test_ais_run(nnet_type):

    n_runs = 5
    n_betas = 5
    
    nnet = nnet_for_testing(nnet_type)
    dataset = Random('probability')
    data = B.get_value(dataset.train.data)

    ais = estimators.AIS(nnet, data, n_runs, n_betas=n_betas)

    ais.run_logZ()
    logZ_out, logZ_low_out, logZ_high_out = ais.estimate_log_error_Z()

    assert logZ_high_out >= logZ_out
    assert logZ_low_out <= logZ_out
Пример #7
0
def test_training_init(nnet_type, training_type):
    nb_pos_steps = 2
    nb_neg_steps = 2
    batch_size = 6

    nnet = nnet_for_testing(nnet_type)
    train = _init_training(training_type, nnet, nb_pos_steps, nb_neg_steps,
                           batch_size)

    assert hasattr(train, 'pos_sampler')
    assert hasattr(train, 'neg_sampler')

    if nnet_type == 'rbm':
        nb_pos_steps = 1

    assert train.nb_pos_steps == nb_pos_steps
    assert train.nb_neg_steps == nb_neg_steps
Пример #8
0
def test_regularization_init(nnet_type, W_reg_type, b_reg_type):
    
    if W_reg_type is None:
        W_regularizer = None
    else:
        W_regularizer = regularizers.get(W_reg_type)
    
    if b_reg_type is None:
        b_regularizer = None
    else:
        b_regularizer = regularizers.get(b_reg_type)
    
    nnet = nnet_for_testing(nnet_type, W_regularizer, b_regularizer)
    for synapse in nnet.synapses:
        assert synapse.regularizer == W_regularizer
    for layer in nnet.layers:
        assert layer.regularizer == b_regularizer
Пример #9
0
def test_regularization_fit(nnet_type):
    batch_size = 100
    n_epoch = 1
    W_reg_type = 'l1_l2'
    b_reg_type = 'l1_l2'

    data = random.Random('probability')

    nnet = nnet_for_testing(nnet_type, W_reg_type, b_reg_type)

    nnet = initializers.init_standard(nnet, data)
    optimizer = optimizers.SGD()
    trainer = training.CD(nnet)
    model = Model(nnet, optimizer, trainer)

    # test fit
    out = model.fit(data.train.data, n_epoch=n_epoch, batch_size=batch_size)
    assert isinstance(out, History)
Пример #10
0
def test_init_standard(nnet_type):

    nnet = nnet_for_testing(nnet_type)
    dataset = random.Random('probability')

    b_ls = []
    for layer in nnet.layers:
        b_ls.append(B.eval(layer.b.shape))

    W_ls = []
    for synapse in nnet.synapses:
        W_ls.append(B.eval(synapse.W.shape))

    nnet = initializers.init_standard(nnet, dataset)

    for size, layer in zip(b_ls, nnet.layers):
        assert size == B.eval(layer.b.shape)
    for size, synapse in zip(W_ls, nnet.synapses):
        assert_allclose(size, B.eval(synapse.W.shape))
Пример #11
0
def test_training_prob(nnet_type, training_type, pos_neg):
    nb_pos_steps = 2
    nb_neg_steps = 2
    batch_size = 6

    nnet = nnet_for_testing(nnet_type)
    train = _init_training(training_type, nnet, nb_pos_steps, nb_neg_steps,
                           batch_size)
    inputs, data = _init_data(batch_size)

    prob = train.pos_stats(inputs)
    if pos_neg == 'neg':
        prob = train.neg_stats(prob)

    fn = B.function([inputs], prob, updates=train.updates)

    output = fn(data)
    assert len(output) == len(nnet.layers)
    for out, size in zip(output, nnet.layer_size_list):
        assert out.shape == (batch_size, size)
Пример #12
0
def test_ais_vs_exact():

    n_runs = 5
    n_betas = 5
    
    nnet = nnet_for_testing('rbm')
    dataset = Random('probability')
    data = B.get_value(dataset.train.data)
    
    nnet = init_standard(nnet, dataset)

    ais = estimators.AIS(nnet, data, n_runs, n_betas=n_betas)

    ais.run_logZ()
    logZ_out, logZ_low_out, logZ_high_out = ais.estimate_log_error_Z()

    logZ = estimators.exact_logZ(nnet)

    assert logZ >= logZ_low_out
    assert logZ <= logZ_high_out
Пример #13
0
def test_Sampler(nnet_type, constant, sampler_type):
    beta = 1.0
    nnet = nnet_for_testing(nnet_type)
    batch_size = 30

    constant_ls = []
    if constant is not None:
        constant_ls = [constant]

    if sampler_type == 'meanfield':
        sampler = samplers.Meanfield(nnet)
    elif sampler_type == 'gibbs':
        sampler = samplers.Gibbs(nnet)
    elif sampler_type == 'gibbs_prob':
        sampler = samplers.GibbsProb(nnet)
    else:
        raise NotImplementedError

    input_ls = [np.ones((batch_size, size)) for size in nnet.layer_size_list]
    input_ls_placeholder = [B.placeholder(in_np.shape) for in_np in input_ls]

    sampler.set_param(beta=beta, constant=constant_ls)

    prob_ls, updates = sampler.run_chain(input_ls_placeholder,
                                         beta=beta,
                                         constant=constant_ls)
    fn = B.function(input_ls_placeholder, prob_ls, updates=updates)
    prob_ls = fn(*input_ls)

    assert len(prob_ls) == len(input_ls)
    for i, p in enumerate(prob_ls):
        if i in constant_ls:
            assert_allclose(p, input_ls[i])
        else:
            m = np.ones((batch_size, nnet.layer_size_list[i]))
            if sampler_type == 'gibbs':
                assert_allclose((p + np.logical_not(p)), m)
            else:
                assert_allclose(p, 0.5 * m)
Пример #14
0
def test_base_Sampler(nnet_type, constant, sampler_type):
    beta = 1.0
    nnet = nnet_for_testing(nnet_type)
    batch_size = 30

    constant_ls = []
    if constant is not None:
        constant_ls = [constant]

    sampler = samplers.Sampler(nnet)

    input_ls = [
        B.variable(np.ones((batch_size, size)))
        for size in nnet.layer_size_list
    ]

    sampler.set_param(beta=beta, constant=constant_ls)

    if sampler_type == 'probability':
        prob_ls = sampler.probability(*input_ls)
    elif sampler_type == 'sample':
        prob_ls = sampler.sample(*input_ls)
    elif sampler_type == 'sample_inputs':
        prob_ls = sampler.sample_inputs(*input_ls)
    else:
        raise NotImplementedError

    assert len(prob_ls) == len(input_ls)
    for i, p in enumerate(prob_ls):
        if i in constant_ls:
            assert p == input_ls[i]
        else:
            m = np.ones((batch_size, nnet.layer_size_list[i]))
            pp = B.eval(p)
            if sampler_type == 'sample':
                assert_allclose((pp + np.logical_not(pp)), m)
            else:
                assert_allclose(pp, 0.5 * m)
Пример #15
0
def test_exact_error():
    nnet = nnet_for_testing('dbm')
    with pytest.raises(Exception) as e_info:
        fail = estimators.exact_logZ(nnet)
Пример #16
0
def test_exact():
    nnet = nnet_for_testing('rbm')
    logZ = estimators.exact_logZ(nnet)
    assert logZ.size == 1
Пример #17
0
def test_CSVLogger(sep):
    """
    This test is a slight modification of test_CSVLogger from
    https://github.com/fchollet/keras/blob/master/tests/keras/test_callbacks.py
    """
    
    nnet = nnet_for_testing('rbm')
    data = random.Random('probability')
    
    batch_size = 6
    n_epoch = 1

    if sep == '\t':
        filepath = 'log.tsv'
    elif sep == ',':
        filepath = 'log.csv'

    def make_model(dbm, data):

        optimizer = optimizers.SGD()

        trainer = training.CD(dbm)

        model = Model(dbm, optimizer, trainer)

        return model

    # case 1, create new file with defined separator
    model = make_model(nnet, data)
    cbks = [callbacks.CSVLogger(filepath, separator=sep)]
    history = model.fit(data.train.data,
                        batch_size=batch_size,
                        n_epoch=n_epoch,
                        callbacks=cbks,
                        validation_data=data.valid.data)

    assert os.path.exists(filepath)
    with open(filepath) as csvfile:
        dialect = Sniffer().sniff(csvfile.read())
    assert dialect.delimiter == sep
    del model
    del cbks

    # case 2, append data to existing file, skip header
    model = make_model(nnet, data)
    cbks = [callbacks.CSVLogger(filepath, separator=sep, append=True)]
    history = model.fit(data.train.data,
                        batch_size=batch_size,
                        n_epoch=n_epoch,
                        callbacks=cbks,
                        validation_data=data.valid.data)

    # case 3, reuse of CSVLogger object
    history = model.fit(data.train.data,
                        batch_size=batch_size,
                        n_epoch=n_epoch,
                        callbacks=cbks,
                        validation_data=data.valid.data)

    import re
    with open(filepath) as csvfile:
        output = " ".join(csvfile.readlines())
        assert len(re.findall('epoch', output)) == 1

    os.remove(filepath)
Пример #18
0
def _dbm_prep(nnet_type):
    mbs = 6
    nnet = nnet_for_testing(nnet_type)
    x = B.variable(np.zeros((mbs, nnet.layer_size_list[0])))
    
    return nnet, x