def test_search(): # Check whether we get no exceptions... I,J = 10,9 values_K = [1,2,4,5] values_L = [5,4,3] R = 2*numpy.ones((I,J)) R[0,0] = 1 M = numpy.ones((I,J)) priors = { 'alpha':3, 'beta':4, 'lambdaF':5, 'lambdaS':6, 'lambdaG':7 } initFG = 'exp' initS = 'random' iterations = 1 gridsearch = GridSearch(classifier,values_K,values_L,R,M,priors,initS,initFG,iterations) gridsearch.search()
# Generate data (_, _, _, _, _, R) = generate_dataset(I, J, true_K, true_L, lambdaF, lambdaS, lambdaG, tau) M = try_generate_M(I, J, fraction_unknown, attempts_M) # Run the line search. The priors lambdaF,S,G need to be a single value (recall K,L is unknown) priors = { 'alpha': alpha, 'beta': beta, 'lambdaF': lambdaF[0, 0], 'lambdaS': lambdaS[0, 0], 'lambdaG': lambdaG[0, 0] } grid_search = GridSearch(classifier, values_K, values_L, R, M, priors, initS, initFG, iterations, restarts) grid_search.search(burn_in, thinning) # Plot the performances of all three metrics for metric in ['loglikelihood', 'BIC', 'AIC', 'MSE']: # Make three lists of indices X,Y,Z (K,L,metric) values = numpy.array(grid_search.all_values(metric)).flatten() list_values_K = numpy.array([values_K for l in range(0, len(values_L)) ]).T.flatten() list_values_L = numpy.array([values_L for k in range(0, len(values_K))]).flatten() # Set up a regular grid of interpolation points Ki, Li = (numpy.linspace(min(list_values_K), max(list_values_K), 100), numpy.linspace(min(list_values_L), max(list_values_L), 100)) Ki, Li = numpy.meshgrid(Ki, Li)
(_, _, _, _, _, R) = generate_dataset(I, J, true_K, true_L, lambdaF, lambdaS, lambdaG, tau) M = numpy.ones((I, J)) #M = try_generate_M(I,J,fraction_unknown,attempts_M) # Run the line search. The priors lambdaF,S,G need to be a single value (recall K,L is unknown) priors = { 'alpha': alpha, 'beta': beta, 'lambdaF': lambdaF[0, 0] / 10, 'lambdaS': lambdaS[0, 0] / 10, 'lambdaG': lambdaG[0, 0] / 10 } grid_search = GridSearch(classifier, values_K, values_L, R, M, priors, initS, initFG, iterations, restarts) grid_search.search() # Plot the performances of all three metrics metrics = ['loglikelihood', 'BIC', 'AIC', 'MSE'] for metric in metrics: # Make three lists of indices X,Y,Z (K,L,metric) values = numpy.array(grid_search.all_values(metric)).flatten() list_values_K = numpy.array([values_K for l in range(0, len(values_L)) ]).T.flatten() list_values_L = numpy.array([values_L for k in range(0, len(values_K))]).flatten() # Set up a regular grid of interpolation points Ki, Li = (numpy.linspace(min(list_values_K), max(list_values_K), 100), numpy.linspace(min(list_values_L), max(list_values_L), 100)) Ki, Li = numpy.meshgrid(Ki, Li)
lambdaS = numpy.ones((true_K,true_L)) lambdaG = numpy.ones((J,true_L)) classifier = bnmtf_vb_optimised initFG = 'kmeans' initS = 'random' # Generate data (_,_,_,_,_,R) = generate_dataset(I,J,true_K,true_L,lambdaF,lambdaS,lambdaG,tau) M = numpy.ones((I,J)) #M = try_generate_M(I,J,fraction_unknown,attempts_M) # Run the line search. The priors lambdaF,S,G need to be a single value (recall K,L is unknown) priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF[0,0]/10, 'lambdaS':lambdaS[0,0]/10, 'lambdaG':lambdaG[0,0]/10 } grid_search = GridSearch(classifier,values_K,values_L,R,M,priors,initS,initFG,iterations,restarts) grid_search.search() # Plot the performances of all three metrics metrics = ['loglikelihood', 'BIC', 'AIC','MSE'] for metric in metrics: # Make three lists of indices X,Y,Z (K,L,metric) values = numpy.array(grid_search.all_values(metric)).flatten() list_values_K = numpy.array([values_K for l in range(0,len(values_L))]).T.flatten() list_values_L = numpy.array([values_L for k in range(0,len(values_K))]).flatten() # Set up a regular grid of interpolation points Ki, Li = (numpy.linspace(min(list_values_K), max(list_values_K), 100), numpy.linspace(min(list_values_L), max(list_values_L), 100)) Ki, Li = numpy.meshgrid(Ki, Li) # Interpolate
lambdaF = numpy.ones((I,true_K)) lambdaS = numpy.ones((true_K,true_L)) lambdaG = numpy.ones((J,true_L)) classifier = bnmtf_gibbs_optimised initFG = 'kmeans' initS = 'random' # Generate data (_,_,_,_,_,R) = generate_dataset(I,J,true_K,true_L,lambdaF,lambdaS,lambdaG,tau) M = try_generate_M(I,J,fraction_unknown,attempts_M) # Run the line search. The priors lambdaF,S,G need to be a single value (recall K,L is unknown) priors = { 'alpha':alpha, 'beta':beta, 'lambdaF':lambdaF[0,0], 'lambdaS':lambdaS[0,0], 'lambdaG':lambdaG[0,0] } grid_search = GridSearch(classifier,values_K,values_L,R,M,priors,initS,initFG,iterations,restarts) grid_search.search(burn_in,thinning) # Plot the performances of all three metrics for metric in ['loglikelihood', 'BIC', 'AIC','MSE']: # Make three lists of indices X,Y,Z (K,L,metric) values = numpy.array(grid_search.all_values(metric)).flatten() list_values_K = numpy.array([values_K for l in range(0,len(values_L))]).T.flatten() list_values_L = numpy.array([values_L for k in range(0,len(values_K))]).flatten() # Set up a regular grid of interpolation points Ki, Li = (numpy.linspace(min(list_values_K), max(list_values_K), 100), numpy.linspace(min(list_values_L), max(list_values_L), 100)) Ki, Li = numpy.meshgrid(Ki, Li) # Interpolate rbf = scipy.interpolate.Rbf(list_values_K, list_values_L, values, function='linear')