Esempio n. 1
0
def update_model(model):
    fts, reward = get_cust_reward()
    action = np.random.randint(3)
    context = np.array(fts).reshape(-1)
    reward = float(reward[action])
    model.update(context, action, reward)
    return model
Esempio n. 2
0
def create_toy_contexual_dataset():
    dataset = ContextualDataset(context_dim=2,
                                num_actions=3,
                                memory_size=-1,
                                intercept=False)
    for i in range(2000):
        fts, reward = get_cust_reward()
        action = i % 3
        r = reward[action]
        dataset.add(fts, action, r)
    return dataset
Esempio n. 3
0
def check_toy_problem():
    customer = get_customer()
    ctype, (age, ft) = customer
    assert isinstance(ctype, int)
    assert isinstance(age, int)
    assert isinstance(ft, float)
    reward = get_rewards(customer)
    assert reward.shape == (3, )
    fts, reward = get_cust_reward()
    df = generate_dataframe(10)
    assert isinstance(df, pd.DataFrame)
    return fts, reward
Esempio n. 4
0
 def test_neural_linear_model(self):
     model = NeuralBandits(num_actions=3,
                           num_features=2,
                           training_freq_network=200,
                           layer_sizes=[50])
     fts, reward = get_cust_reward()
     for i in range(300):
         action = model.action(fts)
         r = reward[action]
         model.update(fts, action, r)
     df = generate_dataframe(500)
     X = df[['age', 'ARPU']].values
     A = df['action'].values
     R = df['reward'].values
     model.fit(X, A, R)
     model.save('test_file')
     model = load_model('test_file')
     X = df[['age', 'ARPU']].sample(2).values
     model.predict(X, parallelize=False)
     os.remove('test_file')