'dropout': (0.1, 1), 'lambda1': (0, 0.5), 'lambda2': (0, 0.5), 'c': (0, 6), 
		'SPDhits': (0.499999, 0.500001), 'rho': (0.35, 0.99), 'epsilon': (1e-9, 1e-1)}

lowerAndUpperBounds = {key: [bounds[key][0], bounds[key][1]] for key, value in bounds.iteritems()}

rng = np.random.RandomState(123)
kFold = 4
trainSetSize = 67553 * (1 - (1./kFold))
batchSize = int(trainSetSize / 9.)
patience = 2
numEpochs = int((patience / 9.) * 15)
BayesianOpt = BayesianOptimization(bounds, rng = rng, optAlg = "adaDelta", batchSize = batchSize, kFold = kFold, numEpochs = numEpochs, validationFrequency = 1, patience = patience, visualize = True, test = True)
#BayesianOpt.initialize(testRecoverDict)
#BayesianOpt.explore(lowerAndUpperBounds)
BayesianOpt.minimize(numInitPoints = 2, numIter = 1)

# RMSprop test

bounds = {'numLayers': (8, 40), 'layerSize': (200, 500), 'firstLayerDropout': (0.6, 1), 
		'dropout': (0.1, 1), 'lambda1': (0, 0.5), 'lambda2': (0, 0.5), 'c': (0, 6), 
		'SPDhits': (0.499999, 0.500001), 'rho': (0.35, 0.99), 'momentum': (0.35, 0.99), 'learningRate': (0.0001, 1), 'epsilon': (1e-9, 1e-1)}

lowerAndUpperBounds = {key: [bounds[key][0], bounds[key][1]] for key, value in bounds.iteritems()}

rng = np.random.RandomState(123)
BayesianOpt = BayesianOptimization(bounds, rng = rng, optAlg = "RMSprop", batchSize = batchSize, kFold = kFold, numEpochs = numEpochs, validationFrequency = 1, patience = patience, visualize = True, test = True)
#BayesianOpt.initialize(testRecoverDict)
BayesianOpt.explore(lowerAndUpperBounds)
BayesianOpt.minimize(numInitPoints = 1, numIter = 1)

print "Test was successful"