def losc_test(paramlist, show, val): fimtgd = FIMTGD(gamma=paramlist[0], n_min=paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) fimtls = FIMTLS(gamma=paramlist[0], n_min=paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) gfimtls = gFIMTLS(gamma=paramlist[0], n_min=paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[5]) cumLossgd = [0] cumLossls = [0] cumLossgls = [0] data = generate_Losc(4000) data = np.array(sorted(data, key=lambda x: x[0])) o_target = data[:, -1] input = data[:, 1:-1] for counter in range(len(data)): noise = (np.random.uniform() - 0.5) * 0.8 target = o_target[counter] + noise cumLossgd.append( cumLossgd[-1] + np.fabs(o_target[counter] - fimtgd.eval_and_learn(np.array(input[counter]), target))) cumLossls.append( cumLossls[-1] + np.fabs(o_target[counter] - fimtls.eval_and_learn(np.array(input[counter]), target))) cumLossgls.append( cumLossgls[-1] + np.fabs(o_target[counter] - gfimtls.eval_and_learn(np.array(input[counter]), target))) if show: f = plt.figure() plt.plot(cumLossgd[1:], label="Gradient Descent Loss") f.hold(True) plt.plot(cumLossls[1:], label="Filter Loss") # avglossgd=np.array([cumLossgd[-1]/len(cumLossgd)]*len(cumLossgd)) # plt.plot(avglossgd,label="Average GD Loss") # plt.plot([cumLossls[-1]/len(cumLossls)]*len(cumLossls), label="Average Filter Loss") plt.title("CumLoss Ratio:" + str( min(cumLossgd[-1], cumLossls[-1]) / max(cumLossgd[-1], cumLossls[-1]))) plt.legend() figname = "g" + str(paramlist[0]) + "_nmin" + str(paramlist[1]) + "_al" + str(paramlist[2]) + "_thr" + str( paramlist[3]) \ + "_lr" + str(paramlist[4]) + ".png" plt.savefig(figname) # plt.show() f.clear() return [cumLossgd, cumLossls, cumLossgls, val, paramlist]
def sine_test(paramlist,show,val): #print(val) #print(paramlist) fimtgd=FIMTGD(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) fimtls=FIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) gfimtls=gFIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[5]) cumLossgd =[0] cumLossls =[0] cumLossgls =[0] if True: start = 0.0 end = 1.0 x = list() y = list() for i in range(4000): input = np.random.uniform(0.0,1.0)*2*np.pi target = np.sin(input) if i > 2000: target += 1.0 o_target = target noise = (np.random.uniform() - 0.5) * 0.8 target += noise x.append(input) y.append(target) cumLossgd.append(cumLossgd[-1] + np.sqrt(np.fabs(o_target - fimtgd.eval_and_learn(np.array(input), target))**2)) cumLossls.append(cumLossls[-1] + np.sqrt(np.fabs(o_target - fimtls.eval_and_learn(np.array(input), target))**2)) cumLossgls.append(cumLossgls[-1] + np.sqrt(np.fabs(o_target - gfimtls.eval_and_learn(np.array(input), target))**2)) #plt.scatter(x=x,y=y) #plt.show() if show: f=plt.figure() plt.plot(cumLossgd[1:], label="Gradient Descent Loss") f.hold(True) plt.plot(cumLossls[1:], label="Filter Loss") #avglossgd=np.array([cumLossgd[-1]/len(cumLossgd)]*len(cumLossgd)) #plt.plot(avglossgd,label="Average GD Loss") #plt.plot([cumLossls[-1]/len(cumLossls)]*len(cumLossls), label="Average Filter Loss") plt.title("CumLoss Ratio:"+str(min(cumLossgd[-1],cumLossls[-1])/max(cumLossgd[-1],cumLossls[-1]))) plt.legend() figname="g"+str(paramlist[0])+"_nmin"+str(paramlist[1])+"_al"+str(paramlist[2])+"_thr"+str(paramlist[3])\ + "_lr"+str(paramlist[4])+".png" plt.savefig(figname) #plt.show() f.clear() #print(i) #print(fimtgd.count_leaves()) #print(fimtgd.count_nodes()) return [cumLossgd,cumLossls,cumLossgls,val,paramlist]
def Kiel_Test(paramlist,show,val): fimtgd=FIMTGD(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) fimtls=FIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) gfimtls=gFIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[5]) cumLossgd =[0] cumLossls =[0] cumLossgls =[0] if True: data = get_Kiel_data() c = 0 for i in range(100000): c += 1 print(str(c)+'/'+str(100000)) input = data[i][8:10] #target = data[1][i] + (np.random.uniform() - 0.5) * 0.2 target = data[i][10] if i > -1: cumLossgd.append(cumLossgd[-1] + np.fabs(target - fimtgd.eval_and_learn(np.array(input), target))) cumLossls.append(cumLossls[-1] + np.fabs(target - fimtls.eval_and_learn(np.array(input), target))) cumLossgls.append(cumLossgls[-1] + np.fabs(target - gfimtls.eval_and_learn(np.array(input), target))) else: #warm start fimtgd.eval_and_learn(np.array(input), target) fimtls.eval_and_learn(np.array(input), target) gfimtls.eval_and_learn(np.array(input), target) #plt.scatter(x=x,y=y) #plt.show() if show: f=plt.figure() plt.plot(cumLossgd[1:], label="Gradient Descent Loss") f.hold(True) plt.plot(cumLossls[1:], label="Filter Loss") #avglossgd=np.array([cumLossgd[-1]/len(cumLossgd)]*len(cumLossgd)) #plt.plot(avglossgd,label="Average GD Loss") #plt.plot([cumLossls[-1]/len(cumLossls)]*len(cumLossls), label="Average Filter Loss") plt.title("CumLoss Ratio:"+str(min(cumLossgd[-1],cumLossls[-1])/max(cumLossgd[-1],cumLossls[-1]))) plt.legend() figname="g"+str(paramlist[0])+"_nmin"+str(paramlist[1])+"_al"+str(paramlist[2])+"_thr"+str(paramlist[3])\ + "_lr"+str(paramlist[4])+".png" plt.savefig(figname) #plt.show() f.clear() #print(i) #print(fimtgd.count_leaves()) #print(fimtgd.count_nodes()) return [cumLossgd,cumLossls,cumLossgls,val,paramlist]
def abalone_test(paramlist,show,val): #print(val) #print(paramlist) fimtgd=FIMTGD(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) fimtls=FIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) gfimtls=gFIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[5]) cumLossgd =[0] cumLossls =[0] cumLossgls =[0] with open( "abalone.data", 'rt') as abalonefile: i = 0 for row in abalonefile: i += 1 row=row.rstrip().split(',') target=float(row[-1]) if row[0]=="M": numgender=1. if row[0]=="I": numgender=0.5 if row[0]=="F": numgender=0. input=[numgender] for item in row[1:-1]: input.append(float(item)) cumLossgd.append(cumLossgd[-1] + np.fabs(target - fimtgd.eval_and_learn(np.array(input), target))) cumLossls.append(cumLossls[-1] + np.fabs(target - fimtls.eval_and_learn(np.array(input), target))) cumLossgls.append(cumLossgls[-1] + np.fabs(target - gfimtls.eval_and_learn(np.array(input), target))) if show: f=plt.figure() plt.plot(cumLossgd[1:], label="Gradient Descent Loss") f.hold(True) plt.plot(cumLossls[1:], label="Filter Loss") #avglossgd=np.array([cumLossgd[-1]/len(cumLossgd)]*len(cumLossgd)) #plt.plot(avglossgd,label="Average GD Loss") #plt.plot([cumLossls[-1]/len(cumLossls)]*len(cumLossls), label="Average Filter Loss") plt.title("CumLoss Ratio:"+str(min(cumLossgd[-1],cumLossls[-1])/max(cumLossgd[-1],cumLossls[-1]))) plt.legend() figname="g"+str(paramlist[0])+"_nmin"+str(paramlist[1])+"_al"+str(paramlist[2])+"_thr"+str(paramlist[3])\ + "_lr"+str(paramlist[4])+".png" plt.savefig(figname) #plt.show() f.clear() #print(i) #print(fimtgd.count_leaves()) #print(fimtgd.count_nodes()) return [cumLossgd,cumLossls,cumLossgls,val,paramlist]
def legendre_test(paramlist,show,val): #print(val) #print(paramlist) fimtgd=FIMTGD(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) fimtls=FIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[4]) gfimtls=gFIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=paramlist[2], threshold=paramlist[3], learn=paramlist[5]) cumLossgd =[0] cumLossls =[0] cumLossgls =[0] if True: start = 0.0 end = 1.0 i = 0 for input,target,o_target in data_provider([9,9,32,32,4],[0.05,0.05,0.05,0.05,0.05],[1000,1000,3000,2000,2000],5): #print(i,'/',2000) i+=1 #cumLossgd.append(cumLossgd[-1] + np.sqrt(np.fabs(o_target - fimtgd.eval_and_learn(np.array(input), target))**2)) #cumLossls.append(cumLossls[-1] + np.sqrt(np.fabs(o_target - fimtls.eval_and_learn(np.array(input), target))**2)) #cumLossgls.append(cumLossgls[-1] + np.sqrt(np.fabs(o_target - gfimtls.eval_and_learn(np.array(input), target))**2)) cumLossgd.append(cumLossgd[-1] + np.sqrt(np.fabs(o_target - fimtgd.eval_and_learn(np.array(input), target))**2)) cumLossls.append(cumLossls[-1] + np.sqrt(np.fabs(o_target - fimtls.eval_and_learn(np.array(input), target))**2)) cumLossgls.append(cumLossgls[-1] + np.sqrt(np.fabs(o_target - gfimtls.eval_and_learn(np.array(input), target))**2)) #plt.scatter(x=x,y=y) #plt.show() if show: f=plt.figure() plt.plot(cumLossgd[1:], label="Gradient Descent Loss") f.hold(True) plt.plot(cumLossls[1:], label="Filter Loss") #avglossgd=np.array([cumLossgd[-1]/len(cumLossgd)]*len(cumLossgd)) #plt.plot(avglossgd,label="Average GD Loss") #plt.plot([cumLossls[-1]/len(cumLossls)]*len(cumLossls), label="Average Filter Loss") plt.title("CumLoss Ratio:"+str(min(cumLossgd[-1],cumLossls[-1])/max(cumLossgd[-1],cumLossls[-1]))) plt.legend() figname="g"+str(paramlist[0])+"_nmin"+str(paramlist[1])+"_al"+str(paramlist[2])+"_thr"+str(paramlist[3])\ + "_lr"+str(paramlist[4])+".png" plt.savefig(figname) #plt.show() f.clear() #print(i) #print(fimtgd.count_leaves()) #print(fimtgd.count_nodes()) return [cumLossgd,cumLossls,cumLossgls,val,paramlist]
def test2d(paramlist,show,val): #print(val) #print(paramlist) fimtgd=FIMTGD(gamma=paramlist[0], n_min = paramlist[1], alpha=[2], threshold=paramlist[3], learn=paramlist[4]) fimtls=FIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=[2], threshold=paramlist[3], learn=paramlist[4]) gfimtls=gFIMTLS(gamma=paramlist[0], n_min = paramlist[1], alpha=[2], threshold=paramlist[3], learn=paramlist[5]) cumLossgd =[0] cumLossls =[0] cumLossgls =[0] if True: start = 0.0 end = 1.0 X = list() Y = list() x, y, z = axes3d.get_test_data(0.1) num_d = len(x)**2 for i in range(len(x)): for j in range(len(y)): input = [x[i,j],y[i,j]] target = z[i,j] X.append(input) Y.append(target) data = [X,Y] data = np.array(data) data = data.transpose() np.random.shuffle(data) data = data.transpose() for i in range(num_d): input = data[0][i] target = data[1][i] + (np.random.uniform() - 0.5) * 0.2 o_target = data[1][i] if num_d/2 < i: target += 1.0 o_target += 1.0 cumLossgd.append(cumLossgd[-1] + np.fabs(o_target - fimtgd.eval_and_learn(np.array(input), target))) cumLossls.append(cumLossls[-1] + np.fabs(o_target - fimtls.eval_and_learn(np.array(input), target))) cumLossgls.append(cumLossgls[-1] + np.fabs(o_target - gfimtls.eval_and_learn(np.array(input), target))) #plt.scatter(x=x,y=y) #plt.show() if show: f=plt.figure() plt.plot(cumLossgd[1:], label="Gradient Descent Loss") f.hold(True) plt.plot(cumLossls[1:], label="Filter Loss") #avglossgd=np.array([cumLossgd[-1]/len(cumLossgd)]*len(cumLossgd)) #plt.plot(avglossgd,label="Average GD Loss") #plt.plot([cumLossls[-1]/len(cumLossls)]*len(cumLossls), label="Average Filter Loss") plt.title("CumLoss Ratio:"+str(min(cumLossgd[-1],cumLossls[-1])/max(cumLossgd[-1],cumLossls[-1]))) plt.legend() figname="g"+str(paramlist[0])+"_nmin"+str(paramlist[1])+"_al"+str(paramlist[2])+"_thr"+str(paramlist[3])\ + "_lr"+str(paramlist[4])+".png" plt.savefig(figname) #plt.show() f.clear() #print(i) #print(fimtgd.count_leaves()) #print(fimtgd.count_nodes()) return [cumLossgd,cumLossls,cumLossgls,val,paramlist]