num_resources = 2
num_fit_initializations = 10
observation_sequence_lengths = np.full(500, 100, dtype=np.int)

#generate synthetic model and data.
#model is really easy.
truemodel = {}

truemodel["As"] = np.zeros((2, 2, num_resources), dtype=np.float_)
for i in range(num_resources):
    truemodel["As"][i, :, :] = np.transpose([[0.7, 0.3], [0.01, 0.99]])
truemodel["learns"] = truemodel["As"][:, 1, 0]
truemodel["forgets"] = truemodel["As"][:, 0, 1]

truemodel["pi_0"] = np.array([[0.9], [0.1]])
truemodel["prior"] = truemodel["pi_0"][1][0]

truemodel["guesses"] = np.full(num_subparts, 0.1, dtype=np.float_)
truemodel["slips"] = np.full(num_subparts, 0.03, dtype=np.float_)

#data!
print("generating data...")
data = synthetic_data.synthetic_data(truemodel, observation_sequence_lengths)

(correct_predictions, state_predictions) = predict_onestep.run(truemodel, data)

print(correct_predictions)
print(state_predictions)

print("finishing...")
Пример #2
0
print ('F1 Score Hapcap Ends')
print (f1_bkt_ends)

#Now lets see if we can do some testing with random data generation

#Use the synthetic data library
observation_sequence_lengths = np.zeros((hapcap_test_students))
for i in range(hapcap_test_students):
	observation_sequence_lengths[i] = S_lengths[i+hapcap_train_students]

total_length = np.sum(observation_sequence_lengths, axis=0)
total_length = int(total_length)
resources_data = np.ones((total_length))
hapcap_model['resources'] = copy.deepcopy(resources_data)

data_fake = synthetic_data.synthetic_data(hapcap_model, observation_sequence_lengths, resources_data)

print ('Test Synthetic Data')
print (data_fake['data'].shape)

data_fake_mean = ((np.mean(data_fake['data'], axis=0)-1) > .5).astype(int)
data_mismatched = np.sum(np.abs(S_test_hapcap_squashed - data_fake_mean), axis=0)

data_accuracy = (S_test_hapcap_squashed.shape[0] - data_mismatched)/S_test_hapcap_squashed.shape[0]

print ('Data Mean Accuracy')
print (data_accuracy)

#Get average
data_fake_accuracies = np.zeros((data_fake['data'].shape[0]))