def fit(self, X, y):
        """
        Multi-class classification, y can be any integer

        Parameters
        ----------
        X : ndarray, shape (m, n)
        y : ndarray, shape (m, 1)

        Returns
        -------
        self.paras : dict
                     Dictionary of trained parameters w and b. If c > 2, the dictionary will store parameters for each
                     class.

        """
        start = time.time()
        X, y, c = check_data(X, y)

        # c is the number of classes
        if c == 2:
            self.paras = self._train(X, y)
        elif c > 2:
            for i in range(c):
                y_copy = deepcopy(y)
                for j in range(len(y)):
                    if y_copy[j][0] == i:
                        y_copy[j][0] = 1
                    else:
                        y_copy[j][0] = 0
                self.paras[i] = self._train(X, y_copy)

        stop = time.time()
        print("Time taken: ", "{0:.3}".format(stop - start), "seconds")
        return self.paras
import numpy as np
from pyBKT.generate import synthetic_data, random_model_uni
from pyBKT.fit import EM_fit
from utils import crossvalidate, accuracy, rmse, auc, check_data, data_helper, ktidem_skills
import copy
np.seterr(divide='ignore', invalid='ignore')
num_fit_initializations = 20
seed, folds = 2020, 5 #can customize to anything, keep same seed and # folds over all trials
results = {} #create dictionary to store accuracy and rmse results

df, skill_list, student_count, data_count, template_count = ktidem_skills.find_skills()
for i in range(10):
    skill_name = skill_list[i]
    results[skill_name]=[student_count[i], data_count[i], template_count[i]]
    
    data = data_helper.convert_data(df, skill_name)
    check_data.check_data(data)
    results[skill_name].append((np.sum(data["data"][0]) - len(data["data"][0]))/len(data["data"][0]))
    print("creating simple model")
    results[skill_name].append(crossvalidate.crossvalidate(data, folds=folds, seed=seed)[2])

    data_multiguess = data_helper.convert_data(df, skill_name, multiguess=True)
    check_data.check_data(data_multiguess)
    print("creating kt_idem model")
    results[skill_name].append(crossvalidate.crossvalidate(data_multiguess, folds=folds, seed=seed)[2])
    #print(results)

print("Model\tNum Students\tNum Data\tNum Templates\tCorrect Percent\tSimple AUC\tKT_IDEM AUC")
for k, v in results.items():
    print("%s\t%d\t%d\t%d\t%.5f\t%.5f\t%.5f" % (k, v[0], v[1], v[2], v[3], v[4], v[5]))
Beispiel #3
0
total_responses = 0

kt_better = 0
pps_better = 0

for i in all_files:
    if i == "README.txt" or i == ".DS_Store":
        continue

    print("Creating model for ", i)

    data, pps_data = ktpps_data_helper.convert_data(i)

    total_responses += len(data["starts"])

    check_data.check_data(data)
    check_data.check_data(pps_data)

    # first, generate the basic model and run accuracy tests using MAE as evaluator
    num_fit_initializations = 20
    best_likelihood = float("-inf")
    for i in range(num_fit_initializations):
        fitmodel = random_model_uni.random_model_uni(1, 1)
        (fitmodel, log_likelihoods) = EM_fit.EM_fit(fitmodel, data)
        if (log_likelihoods[-1] > best_likelihood):
            best_likelihood = log_likelihoods[-1]
            best_model = fitmodel

    data["lengths"] = data["lengths_full"]

    (correct_predictions,
Beispiel #4
0
sys.path.append('../')
import numpy as np
from pyBKT.generate import synthetic_data, random_model_uni
from pyBKT.fit import EM_fit
from utils import crossvalidate, accuracy, rmse, auc, check_data, data_helper
import copy
np.seterr(divide='ignore', invalid='ignore')
num_fit_initializations = 20
skill_name = "Box and Whisker"
seed, folds = 2020, 5 #can customize to anything, keep same seed and # folds over all trials
results = {} #create dictionary to store accuracy and rmse results

#data!
print("starting simple model data collection")
data, df = data_helper.convert_data("as.csv", skill_name, return_df=True)#save dataframe for further trials
check_data.check_data(data)
print("creating simple model")
results["Simple Model"] = crossvalidate.crossvalidate(data, folds=folds, seed=seed)

print("starting majority class calculation")
majority = 0
if np.sum(data["data"][0]) - len(data["data"][0]) > len(data["data"][0]) - (np.sum(data["data"][0]) - len(data["data"][0])):
    majority = 1
pred_values = np.zeros((len(data["data"][0]),))
pred_values.fill(majority)
true_values = data["data"][0].tolist()
pred_values = pred_values.tolist()
results["Majority Class"] = (accuracy.compute_acc(true_values,pred_values), rmse.compute_rmse(true_values,pred_values), auc.compute_auc(true_values, pred_values))


print("starting item_learning_effect data collection")
import sys
sys.path.append('../')
import numpy as np
from pyBKT.generate import synthetic_data, random_model_uni
from pyBKT.fit import EM_fit
from utils import data_helper, check_data
np.seterr(divide='ignore', invalid='ignore')

num_fit_initializations = 20
skill_name = "Table"

data = data_helper.convert_data("as.csv", skill_name, multilearn=True)
check_data.check_data(data)
num_gs = len(data["gs_names"])
num_learns = len(data["resource_names"])

num_fit_initializations = 5
best_likelihood = float("-inf")

for i in range(num_fit_initializations):
    fitmodel = random_model_uni.random_model_uni(num_learns, num_gs) # include this line to randomly set initial param values
    (fitmodel, log_likelihoods) = EM_fit.EM_fit(fitmodel, data)
    print(log_likelihoods[-1])
    if(log_likelihoods[-1] > best_likelihood):
        best_likelihood = log_likelihoods[-1]
        best_model = fitmodel

# compare the fit model to the true model
print('')
print('Trained model for %s skill given %d learning rates, %d guess/slip rate' % (skill_name, num_learns, num_gs))
print('\t\tlearned')
Beispiel #6
0
from utils import crossvalidate, nips_data_helper, check_data, auc
from copy import deepcopy

np.seterr(divide='ignore', invalid='ignore')

num_fit_initializations = 20
skill_count = 124

#data!
Data = nips_data_helper.convert_data("builder_train.csv")
test_data = nips_data_helper.convert_data("builder_test.csv")

print("Data preprocessing finished")

for i in range(skill_count):
    check_data.check_data(Data[i])
    check_data.check_data(test_data[i])

print("All data okay")

total_auc = 0
total_trials = 0
all_true = []
all_pred = []
for skill in range(skill_count):
    num_fit_initializations = 5
    best_likelihood = float("-inf")
    if len(Data[skill]["resources"]) < 1:
        print("No data for skill %s" % skill)
        continue
    else:
from pyBKT.fit import EM_fit, predict_onestep
from utils import crossvalidate, nips_data_helper, check_data, auc
from copy import deepcopy
np.seterr(divide='ignore', invalid='ignore')

num_fit_initializations = 20
skill_count = 124  #hardcoded for nips data set

#data!
Data = nips_data_helper.convert_data("builder_train.csv",
                                     url2="builder_test.csv")

print("Data preprocessing finished")

for i in range(skill_count):
    check_data.check_data(Data[i])

print("All data okay")

all_true = []
all_pred = []
for skill in range(skill_count):

    if len(Data[skill]["resources"]
           ) < 5:  #auc only calculated when there are 2+ classifiers
        print("Not enough data for skill %s" % skill)
        continue

    temp = crossvalidate.crossvalidate(Data[skill],
                                       verbose=False,
                                       return_arrays=True)