Yn_cur /= Ystd_cur # Normalise test data similarly to training data Ytestn_cur = Ytest_cur - Ymean_cur Ytestn_cur /= Ystd_cur #cur.Ymean = Ymean_cur #cur.Ystd = Ystd_cur # As above but for the labels #Lmean_cur = L_cur.mean() #Ln_cur = L_cur - Lmean_cur #Lstd_cur = Ln_cur.std() #Ln_cur /= Lstd_cur #Ltestn_cur = Ltest_cur - Lmean_cur #Ltestn_cur /= Lstd_cur cur.X = None cur.Y = None cur.Y = {'Y': Yn_cur} cur.Ytestn = {'Ytest': Ytestn_cur} cur.Ltest = {'Ltest': Ltest_cur} print fname = modelList[i] if Q > 100: #one could parse and execute the string kernelStr for kernel instead of line below kernel = GPy.kern.RBF(Q, ARD=False) + GPy.kern.Bias(Q) + GPy.kern.White(Q) else: kernel = None # Simulate the function of storing a collection of events
Yn_cur /= Ystd_cur # Normalise test data similarly to training data Ytestn_cur = Ytest_cur - Ymean_cur Ytestn_cur /= Ystd_cur cur.Ymean = Ymean_cur cur.Ystd = Ystd_cur # As above but for the labels #Lmean_cur = L_cur.mean() #Ln_cur = L_cur - Lmean_cur #Lstd_cur = Ln_cur.std() #Ln_cur /= Lstd_cur #Ltestn_cur = Ltest_cur - Lmean_cur #Ltestn_cur /= Lstd_cur cur.X=None cur.Y = {'Y':Yn_cur} cur.Ytestn = {'Ytest':Ytestn_cur} cur.Ltest = {'Ltest':Ltest_cur} fname_cur = fname + '_L' + str(i) cur.training(model_num_inducing, model_num_iterations, model_init_iterations, fname_cur, save_model, economy_save) mm.append(cur) ss = []; sstest = []; for i in range(len(Lunique)): for j in range(len(Lunique)): ss = mm[i].SAMObject.familiarity(mm[j].Y['Y']) print('Familiarity of model ' + participantList[i] + ' given label: ' + participantList[j] + ' using training data is: ' + str(ss)) print("")