Ejemplo n.º 1
0
def test_compute_entropy_automatic_inference(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 0', 'item 1')
    low_entropy = basic_model.compute_entropy(['item 0', 'item 2'])
    high_entropy = basic_model.compute_entropy(['item 0', 'item 1'])
    assert high_entropy
    assert low_entropy
Ejemplo n.º 2
0
def test_inference_with_indifferent_preferences(basic_model):
    basic_model.set_priors([Normal(0, 1), Normal(0, 1)])
    basic_model.add_indifferent_preference('item 0', 'item 2')
    basic_model.infer_weights()
    assert basic_model.weights[0] == basic_model.weights[1]
    basic_model.add_indifferent_preference('item 1', 'item 2')
    basic_model.infer_weights()
    assert basic_model.weights[0] == basic_model.weights[1]
Ejemplo n.º 3
0
def test_suggest_all_suggested_pairs(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 0', 'item 1')
    basic_model.add_strict_preference('item 0', 'item 2')
    basic_model.infer_weights()
    with pytest.warns(UserWarning):
        pair = basic_model.suggest()
    assert 'item 0' not in pair
Ejemplo n.º 4
0
def test_suggest_raises_error(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 0', 'item 1')
    basic_model.add_strict_preference('item 0', 'item 2')
    with pytest.raises(ValueError):
        basic_model.suggest(method='test')
    with pytest.raises(ValueError):
        basic_model.suggest_new_pair(method='test')
Ejemplo n.º 5
0
def test_suggest_new_pair_entropy_method(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 0', 'item 2')
    basic_model.add_strict_preference('item 1', 'item 2')
    basic_model.infer_weights()
    new_pair = basic_model.suggest_new_pair(method='min_entropy')
    for item in new_pair:
        assert item in ['item 0', 'item 1']
Ejemplo n.º 6
0
def test_inference_with_strict_and_indifferent_preferences_with_mean_method(
        basic_model):
    basic_model.set_priors([Normal(0, 1), Normal(0, 1)])
    basic_model.add_strict_preference('item 0', 'item 2')
    basic_model.add_strict_preference('item 1', 'item 2')
    basic_model.add_strict_preference('item 0', 'item 1')
    basic_model.infer_weights(method='mean')
    assert basic_model.weights[0] > basic_model.weights[1]
Ejemplo n.º 7
0
def test_rank_automatic_inference(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 2', 'item 0')
    basic_model.add_strict_preference('item 2', 'item 1')
    basic_model.add_strict_preference('item 1', 'item 0')
    rank = basic_model.rank()
    assert rank.index[0] == 'item 2'
    assert rank.index[1] == 'item 1'
    assert rank.index[2] == 'item 0'
Ejemplo n.º 8
0
def test_inference_with_strict_preferences(basic_model):
    basic_model.set_priors([Normal(0, 1), Normal(0, 1)])
    basic_model.add_strict_preference('item 0', 'item 2')
    assert basic_model.weights is None
    basic_model.infer_weights()
    assert basic_model.weights is not None
    assert basic_model.weights[0] > basic_model.weights[1]
    basic_model.add_strict_preference('item 1', 'item 2')
    basic_model.infer_weights()
    basic_model.add_strict_preference('item 0', 'item 1')
    basic_model.infer_weights()
    assert basic_model.weights[0] > basic_model.weights[1]
Ejemplo n.º 9
0
def test_inference_with_different_priors(basic_model):
    basic_model.set_priors([Normal(1, 1), Exponential(-0.5)])
    assert basic_model.weights is None
    with pytest.warns(UserWarning):
        basic_model.infer_weights()
    assert all(a - b < 1e-4
               for a, b in zip(basic_model.weights.tolist(), [1, 0]))
Ejemplo n.º 10
0
def test_default_normal_prior():
    prior = Normal()
    assert prior.mu == 0
    assert prior.sigma == 1
    assert prior(0) > prior(-1)
    assert prior(0) > prior(1)
    assert prior(np.array([0])) > prior(np.array([-1]))
Ejemplo n.º 11
0
def test_strict_log_probability(basic_model):
    basic_model.set_priors([Normal(1, 0.5), Exponential(0.5)])
    x = np.array([1.0, 0.5])
    assert basic_model.strict_log_probability(
        ('item 0', 'item 1'), x) == pytest.approx(-0.1413058, 0.001)
    assert basic_model.strict_log_probability(
        ('item 1', 'item 0'), x) == pytest.approx(-2.026650, 0.001)
Ejemplo n.º 12
0
def display_page(pathname, data, preference_data):  # noqa
    if pathname == '/':
        return splash_layout
    elif pathname == '/available':
        return available_layout
    elif pathname == '/selection':
        return get_selection_layout(data)
    elif pathname == '/recommendation':
        available_beers_df = punk_df.loc[data['available'], ['name'] + features]
        available_beers_df.set_index('name', inplace=True)
        normalised_available_beers_df = (available_beers_df - available_beers_df.min()) / (
                available_beers_df.max() - available_beers_df.min())
        model = BayesPreference(data=normalised_available_beers_df, normalise=False)
        model.set_priors([Normal() for _ in features])
        for preference in preference_data['preferences']:
            model.add_strict_preference(punk_df.loc[preference[0], 'name'], punk_df.loc[preference[1], 'name'])
        model.infer_weights()
        # table for tasted beers
        tasted_table = model.rank()
        tasted_table['beer'] = tasted_table.index.values

        # table for all beers
        all_beers_df = punk_df.loc[:, ['name'] + features]
        all_beers_df.set_index('name', inplace=True)
        normalised_all_beers_df = (all_beers_df - available_beers_df.min()) / (
                    available_beers_df.max() - available_beers_df.min())
        utilities = [model.weights.dot(row.values) for i, row in normalised_all_beers_df.iterrows()]
        rank_df = pd.DataFrame(utilities, index=normalised_all_beers_df.index.values, columns=['utility'])
        table = rank_df.sort_values(by='utility', ascending=False)
        table['beer'] = table.index.values
        table['abv'] = all_beers_df.loc[table.index, 'abv'].div(100).values
        table['ibu'] = all_beers_df.loc[table.index, 'ibu'].values
        table['ph'] = all_beers_df.loc[table.index, 'ph'].values
        table['hops'] = all_beers_df.loc[table.index, 'n_hops'].values
        table['color'] = all_beers_df.loc[table.index, 'ebc'].values
        table['tasted'] = ["yes" if (beer in tasted_table.index) else "no" for beer in table.index]
        cols = ['beer', 'utility', 'tasted', 'abv', 'ibu', 'ph', 'hops', 'color']
        table = table[cols]
        weights_table = pd.DataFrame({col: [weight] for col, weight in zip(model.data.columns, model.weights)})
        return get_recommendation_layout(tasted_table, table, weights_table)
Ejemplo n.º 13
0
def test_indifferent_log_probability(basic_model):
    basic_model.set_priors([Normal(1, 0.5), Exponential(0.5)])
    x = np.array([1.0, 0.5])
    assert basic_model.indifferent_log_probability(
        ('item 0', 'item 1'), x) == pytest.approx(-0.7188213, 0.001)
Ejemplo n.º 14
0
def test_probability(basic_model):
    basic_model.set_priors([Normal(1, 0.5), Exponential(0.5)])
    x = np.array([1.0, 0.5])
    assert basic_model.probability(x) == np.exp(basic_model.log_probability(x))
Ejemplo n.º 15
0
def test_normal_prior_with_negative_weights():
    prior = Normal(mu=-2, sigma=5)
    assert prior.mu == -2
    assert prior.sigma == 5
    assert prior(-2) > prior(3)
    assert prior(-2) > prior(-7)
Ejemplo n.º 16
0
def test_normal_prior():
    prior = Normal(mu=2, sigma=5)
    assert prior.mu == 2
    assert prior.sigma == 5
    assert prior(2) > prior(-3)
    assert prior(2) > prior(7)
Ejemplo n.º 17
0
import pandas as pd

from pypbl.priors import Normal, Exponential
from pypbl.elicitation import BayesPreference

data = pd.read_csv('data/mtcars.csv')
print(data)

# set index of the data frame to be the item names
data.set_index('model', inplace=True)

p = BayesPreference(data=data)
p.set_priors([
    Exponential(1),  # MPG - high miles per gallon is preferred
    Normal(),  # number of cylinders
    Normal(),  # displacement
    Exponential(2),  # horsepower - high horsepower is preferred
    Normal(),  # real axle ratio
    Normal(),  # weight
    Exponential(-3),  # quarter mile time - high acceleration is preferred
    Normal(),  # engine type
    Normal(),  # transmission type
    Normal(),  # number of gears
    Normal()  # number of carburetors
])

# add some preferences and infer the weights for each parameter
p.add_strict_preference('Pontiac Firebird', 'Fiat 128')
p.add_strict_preference('Mazda RX4', 'Mazda RX4 Wag')
p.add_indifferent_preference('Merc 280', 'Merc 280C')
p.infer_weights(method='mean')
Ejemplo n.º 18
0
def test_incorrect_set_priors(basic_model):
    assert basic_model.priors is None
    with pytest.raises(AttributeError):
        basic_model.set_priors([Normal()])
Ejemplo n.º 19
0
def test_inference_with_normal_priors(basic_model):
    basic_model.set_priors([Normal(1, 0.5), Normal(2, 0.5)])
    assert basic_model.weights is None
    basic_model.infer_weights()
    assert all(a - b < 1e-4
               for a, b in zip(basic_model.weights.tolist(), [1, 2]))
Ejemplo n.º 20
0
def test_inference_with_normal_priors_parsing_mean_method(basic_model):
    basic_model.set_priors([Normal(1, 0.5), Normal(2, 0.5)])
    assert basic_model.weights is None
    basic_model.infer_weights(method='mean', iterations=500)
    assert all(a - b < 0.5
               for a, b in zip(basic_model.weights.tolist(), [1, 2]))
Ejemplo n.º 21
0
def test_suggest_variance_method(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 0', 'item 1')
    basic_model.infer_weights()
    pair = basic_model.suggest(method='max_variance')
    assert 'item 0' in pair
Ejemplo n.º 22
0
def test_inference_raises_error(basic_model):
    basic_model.set_priors([Normal(), Normal()])
    basic_model.add_strict_preference('item 0', 'item 1')
    with pytest.raises(ValueError):
        basic_model.infer_weights(method='test')
Ejemplo n.º 23
0
features = np.random.rand(100, 3)
data = pd.DataFrame(features, columns=feature_names, index=item_names)

known_weights = (5, -1, 2)

print(data)

n_preferences = 30
n_repeats = 30

random_method = []
for test in range(n_repeats):
    # RANDOM METHOD
    print('RANDOM METHOD: {} of {}'.format(test, n_repeats))
    random_model = BayesPreference(data=data)
    random_model.set_priors([Normal() for i in range(n_features)])
    random_method_weight_error = []
    for i in range(n_preferences):
        if i == 0:
            suggested_pair = ['item 0', 'item 1']
        else:
            suggested_pair = random_model.suggest_new_pair(method='random')

        a_utility = sum([
            x * w for x, w in zip(data.loc[suggested_pair[0], :].values,
                                  known_weights)
        ])
        b_utility = sum([
            x * w for x, w in zip(data.loc[suggested_pair[1], :].values,
                                  known_weights)
        ])
Ejemplo n.º 24
0
from pypbl.priors import Normal
from pypbl.elicitation import BayesPreference

data = pd.read_csv('data/bdbeers.csv')
print(data)

# set index of the data frame to be the item names
data.set_index('name', inplace=True)

# drop some columns that are unlikely to influence preferences
data.drop(columns=['srm', 'target_fg'], inplace=True)

p = BayesPreference(data=data)
p.set_priors([
    Normal(),  # abv - alcohol strength
    Normal(),  # attenuation_level
    Normal(),  # ebc - beer colour
    Normal(),  # ibu - bitterness
    Normal(),  # n_hops - number of hops
    Normal(),  # n_malt - number of malts
    Normal(),  # ph - ph level
    Normal(),  # target_og - target gravity
])

# add some preferences and infer the weights for each parameter
p.add_strict_preference('Indie Pale Ale', 'Kingpin')
p.add_strict_preference('Dead Pony Club', 'Indie Pale Ale')
p.add_strict_preference('Dead Pony Club', 'Punk IPA 2010 - Current')
p.add_strict_preference('5am Saint', 'Dead Pony Club')
p.add_strict_preference('Hazy Jane', '5am Saint')
Ejemplo n.º 25
0
def test_set_priors(basic_model):
    assert basic_model.priors is None
    basic_model.set_priors([Normal(), Normal()])
    for prior in basic_model.priors:
        assert isinstance(prior, Normal)