Beispiel #1
0
 def test_scheduler(self):
     scheduler = ReduceOnWorsening()
     clf = algorithms.GRAM(max_iter=10, learning_rate=.01, scheduler=scheduler)\
      .fit(self.KLtr, self.Ytr)
     scheduler = ReduceOnWorsening(multiplier=.6, min_lr=1e-4)
     clf = algorithms.GRAM(max_iter=10, learning_rate=.01, scheduler=scheduler)\
      .fit(self.KLtr, self.Ytr)
Beispiel #2
0
from MKLpy.utils.misc import identity_kernel
import torch

#making 20 homogeneous polynomial kernels.
#I suggest to add the identity kernel in order to make the GRAM initial solution easily separable
#if the initial sol is not separable, GRAM may not work well
KLtr = [pairwise.homogeneous_polynomial_kernel(Xtr, degree=d) for d in range(1,11)] + [identity_kernel(len(Ytr))]
KLte = [pairwise.homogeneous_polynomial_kernel(Xte,Xtr, degree=d) for d in range(1,11)]
KLte.append(torch.zeros(KLte[0].size()))


from MKLpy.algorithms import GRAM
from MKLpy.scheduler import ReduceOnWorsening
from MKLpy.callbacks import EarlyStopping

earlystop = EarlyStopping(
	KLte, 
	Yte, 
	patience=100,
	cooldown=1, 
	metric='roc_auc',
)
scheduler = ReduceOnWorsening()

clf = GRAM(
	max_iter=100, 
	learning_rate=.1, 
	callbacks=[earlystop], 
	scheduler=scheduler).fit(KLtr, Ytr)