コード例 #1
0
 def test_FullMTL(self):
     metrics = ['rmse']
     ddataset = SD.SyntheticDataCreator(num_tasks=3,
                                        cellsPerTask=500,
                                        drugsPerTask=10,
                                        function="gauss",
                                        normalize=False,
                                        noise=1,
                                        graph=False,
                                        test_split=0.3)
     dataset.prepare_data()
     exp_folder = "fullMtlGP_test"
     exp = ModelTraining(exp_folder)
     methods = [
         MtlGP.GPyFullMTL(num_iters=50,
                          length_scale=20,
                          noise_covar=.9,
                          n_inducing_points=500,
                          num_tasks=3)
     ]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #2
0
 def test_NonNegative_MF(self):
     exp_folder = "svdnnmf_test"
     exp = ModelTraining(exp_folder)
     methods = [NonNegative_MF(n_factors=30)]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #3
0
 def test_KNNBasic(self):
     exp_folder = "knnbasic_test"
     exp = ModelTraining(exp_folder)
     methods = [KNN_Basic(k=10)]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #4
0
 def test_HadamardGP(self):
     exp_folder = "HadamardGP_test"
     exp = ModelTraining(exp_folder)
     methods = [MtlGP.HadamardMTL(num_iters=50, length_scale=20, noise_covar=.9, n_inducing_points=500, \
                             composite=False, learning_rate=.1, validate=False)]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #5
0
 def test_ExactGP(self):
     exp_folder = "exactGP_test"
     exp = ModelTraining(exp_folder)
     methods = [
         ExactGPRegression(num_iters=10, length_scale=50, noise_covar=1.5)
     ]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #6
0
 def test_NCF_MTL_MF(self):
     hyperparams_mtlmf = {'batch_size': 64, 'epochs': 150, 'layers': '[64,32,16,8]', \
            'learner': 'adam', 'lr': .001,'mf_lr': .001, 'num_factors': 10, \
            'reg_layers': '[0,0,0,.01]', 'reg_mf': 0.01, 'verbose': 1}
     methods = [
         NCF_MTL.Neural_Collaborative_Filtering_FeaturesMTLMF(
             hyperparams_mtlmf, 'NCF_MTL_MF', 'feature_based')
     ]
     exp_folder = "ncf_test_MF"
     exp = ModelTraining(exp_folder)
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 2  # arbitrary threshold for reasonable prediction
コード例 #7
0
 def test_NCF(self):
     hyperparams = {'batch_size': 32, 'epochs': 200, 'layers': '[64,32,16,8]', \
                'learner': 'rmsprop', 'lr': 0.001, 'num_factors': 8, 'num_neg': 4, \
                'reg_layers': '[0,0,0,0]', 'reg_mf': 0.0, 'verbose': 1, 'warm_start':False}
     exp_folder = "ncf_test"
     exp = ModelTraining(exp_folder)
     methods = [
         Neural_Collaborative_Filtering(hyperparams,
                                        'Ratings matrix NCF',
                                        'non_feature_based',
                                        warm_start=False)
     ]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 2  # arbitrary threshold for reasonable prediction
コード例 #8
0
 def test_SparseCompGP(self):
     exp_folder = "sparseCompGP_test"
     exp = ModelTraining(exp_folder)
     methods = [
         SparseGPCompositeKernelRegression(num_iters=15,
                                           learning_rate=1e-1,
                                           noise_covar=1.0,
                                           length_scale_cell=30.0,
                                           output_scale_cell=1.0,
                                           length_scale_drug=30.0,
                                           output_scale_drug=1.0)
     ]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #9
0
 def test_NN(self):
     ddataset = SD.SyntheticDataCreator(num_tasks=3,
                                        cellsPerTask=400,
                                        drugsPerTask=10,
                                        function="cosine",
                                        normalize=True,
                                        noise=1,
                                        graph=False,
                                        test_split=0.3)
     dataset.prepare_data()
     exp_folder = "NN_test"
     exp = ModelTraining(exp_folder)
     methods = [FeedForwardNN([25, 25], 'relu', epochs=60, lr=1e-3)]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 1.5  # arbitrary threshold for reasonable prediction
コード例 #10
0
 def test_NCFFeat(self):
     exp_folder = "ncfFeat_test"
     exp = ModelTraining(exp_folder)
     hyperparams_feats = {'batch_size': 64, 'epochs': 150, 'layers': '[64,32,16,8]', \
                'learner': 'adam', 'lr': 0.001, 'mf_pretrain': '', 'mlp_pretrain': '', \
                'num_factors': 8, 'num_neg': 4, 'out': 1, 'path': 'Data/', \
                'reg_layers': '[0,0,0,0]', 'reg_mf': 0, 'verbose': 1}
     methods = [
         Neural_Collaborative_Filtering_Features(
             hyperparams_feats,
             'Neural Collaborative Filtering',
             'feature_based',
             warm_start=True)
     ]
     exp.execute(dataset, methods, metrics, nruns=1)
     df = exp.getResultsWrapper()
     rmses = df['Value'].values
     for rmse in rmses:
         assert rmse < 2  # arbitrary threshold for reasonable prediction
コード例 #11
0

    methods = [MtlGP.HadamardMTL(num_iters=150, length_scale=57, noise_covar=.24, n_inducing_points=1000, \
                                composite=False, learning_rate=.07, validate=False,bias=False,stabilize=True),
               MtlGP.GPyFullMTL(num_iters=72, length_scale=58.828, noise_covar=0.31587, n_inducing_points=500,  num_tasks=3, learning_rate=0.02729),
               NCF_MTL.Neural_Collaborative_Filtering_FeaturesMTLMLP(hyperparams_mtlmlp,'MTL NCF MLP', 'feature_based'),
               NCF_MTL.Neural_Collaborative_Filtering_FeaturesMTLMF(hyperparams_mtlmf,'NCF_MTL_MF', 'feature_based'),
               SVD_MF(n_factors=10),
               KNN_Normalized(k=10)
              ]

    metrics = ['rmse','mae']

    exp_folder = __file__.strip('.py')
    exp = ModelTraining(exp_folder)
    exp.execute(dataset, methods, metrics, nruns=1) #increase n runs for more accurate error
    exp.generate_report()

    
    
"""

frozen hyperparams-- sometimes they do worse


    hyperparams_mtlmf = {'batch_size': 64, 'epochs': 227, 'layers': '[64,32,16,8]', \
                   'learner': 'sgd', 'lr': 1.00293510662245e-05,'mf_lr': 0.000111324, 'num_factors': 100, \
                   'reg_layers': '[0,0,0,.01]', 'reg_mf':  0.009970084324087263, 'verbose': 1}
    hyperparams_mtlmlp = {'batch_size': 64, 'epochs': 238, 'layers': '[64,32,16,8]', \
                   'learner': 'sgd', 'lr': 0.00042715,'mlp_lr': .001, 'num_factors': 84, \
                   'reg_layers': '[0,0,0,.01]', 'reg_mf':0.0028382, 'verbose': 1}
コード例 #12
0
if __name__ == '__main__':

    # set here the list of task-specific datasets
    nb_tasks = 10
    nb_samples = 200
    dimension = 30
    dataset = ArtificialClassificationDatasetMTL(nb_tasks,
                                                 nb_samples,
                                                 dimension)
    dataset.prepare_data()

    # list of methods to compare against
    # note that the same method can be called many times just using
    # different hyper-parameter values
    methods = [
        DNNClassifier(name='DNN-STL'),
        PooledRandomForestClassifier(n_estimators=100, name='Pooled-RF'),
        DNNClassifierPooled(batch_size=32, name='DNN Pooled'),
        DNNClassifierMTL(batch_size=32, name='DNN MTL'),
    ]

    # list of metrics to measure method's performance
    # see list of available metrics in utils/performance_metrics.py
    metrics = ['accuracy']

    exp_folder = __file__.strip('.py')
    exp = ModelTraining(exp_folder)
    exp.execute(dataset, methods, metrics, nb_runs=3)
    exp.generate_report()
コード例 #13
0
from design import ModelTraining
from methods.mtl.MF_MTL import MF_MTL
from methods.matrix_factorization.MF_STL import MF_STL

# from methods.regressor.FFNN import FeedForwardNN
from methods.matrix_factorization.MF import SVD_MF, NonNegative_MF
from methods.knn.KNN import KNN_Normalized
from datasets.DrugCellLines import DrugCellLinesMTL

if __name__ == '__main__':

    drug_transform = {'type': 'pca', 'num_comp': 10}
    cell_transform = {'type': 'pca', 'num_comp': 10}
    dataset = DrugCellLinesMTL(['CCLE', 'GDSC', 'CTRP', 'NCI60'],
                               common=True,
                               unseen_cells=False,
                               normalize=True,
                               test_split=0.2,
                               drug_transform=drug_transform,
                               cell_transform=cell_transform)
    dataset.prepare_data()

    methods = [SVD_MF(n_factors=100), KNN_Normalized(k=10)]

    metrics = ['rmse', 'explained_variance_score', 'mae']

    exp_folder = __file__.strip('.py')
    exp = ModelTraining(exp_folder)
    exp.execute(dataset, methods, metrics, nruns=1)
    exp.generate_report()
コード例 #14
0
ファイル: singletask_exp.py プロジェクト: LLNL/MTLRecSys
    hyperparams_feats = {'batch_size': 64, 'epochs': 150, 'layers': '[64,32,16,8]', \
                   'learner': 'adam', 'lr': 0.001, 'mf_pretrain': '', 'mlp_pretrain': '', \
                   'num_factors': 10, 'num_neg': 4, 'out': 1, 'path': 'Data/', \
                   'reg_layers': '[0,0,0,0]', 'reg_mf': 0, 'verbose': 1}

    hyperparams = {'batch_size': 32, 'epochs': 300, 'layers': '[64,32,16,8]', \
                   'learner': 'adam', 'lr': .001, 'mf_pretrain': '', 'mlp_pretrain': '', \
                   'num_factors': 8, 'num_neg': 4, 'out': 1, 'path': 'Data/', \
                   'reg_layers': '[0,0,0,.01]', 'reg_mf': 0.01, 'verbose': 1}


    methods = [
               Neural_Collaborative_Filtering_Features(hyperparams_feats,'NCF', 'feature_based',warm_start=False),
               KNN_Normalized(k=10),
               FeedForwardNN([25, 25], 'relu', epochs=60, lr=1e-3),
               SVD_MF(n_factors=10),
               SparseGPRegression(num_iters=57, length_scale=28.99850556026648, noise_covar=0.880495306431355, \
                                  n_inducing_points=500,learning_rate=0.08750861518081232,output_scale=0.2726750961954937),
               SparseGPCompositeKernelRegression(num_iters=55, length_scale_cell=23.909358694255733, length_scale_drug=25.35428771496125, \
                                                 output_scale_cell= 0.23155460333191216, output_scale_drug=2.3750260726401704, \
                                                 noise_covar=2, n_inducing_points=500, learning_rate= 0.009494776750100815),
               #ExactGPRegression(num_iters=10, length_scale=50, noise_covar=1.5) this one is very slow
                Neural_Collaborative_Filtering(hyperparams, 'Ratings matrix NCF','non_feature_based',warm_start=False)
            ]
    metrics = ['rmse', 'mae']

    exp_folder = __file__.strip('.py')
    exp = ModelTraining(exp_folder)
    exp.execute(dataset, methods, metrics, nruns=1)  # delete after testing
    exp.generate_report()