Beispiel #1
0
    'iterations' : 3000,
    'init_FG' : 'kmeans',
    'init_S' : 'exponential',
    'expo_prior' : 0.1
}
K_range = [2,4,6,8,10]
L_range = [2,4,6,8,10]
P = 5
no_folds = 5
output_file = "./results.txt"
files_nested_performances = ["./fold_%s.txt" % fold for fold in range(1,no_folds+1)]

# Construct the parameter search
parameter_search = [{'K':K,'L':L} for (K,L) in itertools.product(K_range,L_range)]

# Load in the Sanger dataset
(_,X_min,M,_,_,_,_) = load_Sanger(standardised=standardised)

# Run the cross-validation framework
random.seed(42)
numpy.random.seed(9000)
nested_crossval = MatrixCrossValidation(
    method=NMTF,
    X=X_min,
    M=M,
    K=no_folds,
    parameter_search=parameter_search,
    train_config=train_config,
    file_performance=output_file
)
nested_crossval.run()
Beispiel #2
0
output_file = "./results_TEST.txt"

alpha, beta = 1., 1.
lambdaF = 1. / 10.
lambdaS = 1. / 10.
lambdaG = 1. / 10.
priors = {
    'alpha': alpha,
    'beta': beta,
    'lambdaF': lambdaF,
    'lambdaS': lambdaS,
    'lambdaG': lambdaG
}

# Load in the Sanger dataset
(_, X_min, M, _, _, _, _) = load_Sanger(standardised=standardised)

# Run the cross-validation framework
random.seed(42)
numpy.random.seed(9000)
nested_crossval = GreedySearchCrossValidation(classifier=bnmtf_vb_optimised,
                                              R=X_min,
                                              M=M,
                                              values_K=K_range,
                                              values_L=L_range,
                                              folds=no_folds,
                                              priors=priors,
                                              init_S=init_S,
                                              init_FG=init_FG,
                                              iterations=iterations,
                                              restarts=restarts,
Beispiel #3
0
standardised = False #standardised Sanger or unstandardised

repeats = 10

iterations = 500
init_UV = 'random'
I, J, K = 622,139,25

alpha, beta = 1., 1. #1., 1.
lambdaU = numpy.ones((I,K))/10.
lambdaV = numpy.ones((J,K))/10.
priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV }

# Load in data
(_,R,M,_,_,_,_) = load_Sanger(standardised=standardised)


# Run the VB algorithm, <repeats> times
times_repeats = []
performances_repeats = []
for i in range(0,repeats):
    # Set all the seeds
    numpy.random.seed(0)
    
    # Run the classifier
    BNMF = bnmf_gibbs_optimised(R,M,K,priors) 
    BNMF.initialise(init_UV)
    BNMF.run(iterations)

    # Extract the performances and timestamps across all iterations
Beispiel #4
0
import numpy, random, scipy, matplotlib.pyplot as plt

##########

standardised = False  #standardised Sanger or unstandardised

repeats = 10

iterations = 1000
I, J, K = 622, 139, 25

init_UV = 'exponential'
expo_prior = 1 / 10.

# Load in data
(_, R, M, _, _, _, _) = load_Sanger(standardised=standardised)

# Run the VB algorithm, <repeats> times
times_repeats = []
performances_repeats = []
for i in range(0, repeats):
    # Set all the seeds
    numpy.random.seed(0)

    # Run the classifier
    nmf = NMF(R, M, K)
    nmf.initialise(init_UV, expo_prior)
    nmf.run(iterations)

    # Extract the performances and timestamps across all iterations
    times_repeats.append(nmf.all_times)