Exemplo n.º 1
0
def test_all_values():
    I, J = 10, 9
    values_K = [1, 2, 4, 5]
    R = 2 * numpy.ones((I, J))
    M = numpy.ones((I, J))
    priors = {'alpha': 3, 'beta': 4, 'lambdaU': 5, 'lambdaV': 6}
    initUV = 'exp'
    iterations = 11

    linesearch = LineSearch(classifier, values_K, R, M, priors, initUV,
                            iterations)
    linesearch.all_performances = {
        'BIC': [10, 9, 8, 7],
        'AIC': [11, 13, 12, 14],
        'loglikelihood': [16, 15, 18, 17]
    }
    assert numpy.array_equal(linesearch.all_values('BIC'), [10, 9, 8, 7])
    assert numpy.array_equal(linesearch.all_values('AIC'), [11, 13, 12, 14])
    assert numpy.array_equal(linesearch.all_values('loglikelihood'),
                             [16, 15, 18, 17])
    with pytest.raises(AssertionError) as error:
        linesearch.all_values('FAIL')
    assert str(error.value) == "Unrecognised metric name: FAIL."
(_, _, _, _, R) = generate_dataset(I, J, true_K, lambdaU, lambdaV, tau)
M = numpy.ones((I, J))
#M = try_generate_M(I,J,fraction_unknown,attempts_M)

# Run the line search. The priors lambdaU and lambdaV need to be a single value (recall K is unknown)
priors = {
    'alpha': alpha,
    'beta': beta,
    'lambdaU': lambdaU[0, 0] / 10,
    'lambdaV': lambdaV[0, 0] / 10
}
line_search = LineSearch(classifier, values_K, R, M, priors, initUV,
                         iterations, restarts)
line_search.search(burn_in, thinning)

# Plot the performances of all three metrics - but MSE separately
metrics = ['loglikelihood', 'BIC', 'AIC', 'MSE']
for metric in metrics:
    plt.figure()
    plt.plot(values_K, line_search.all_values(metric), label=metric)
    plt.legend(loc=3)

# Also print out all values in a dictionary
all_values = {}
for metric in metrics:
    all_values[metric] = line_search.all_values(metric)

print "all_values = %s" % all_values
'''

'''