def test_gaussian_transfer(): assert helpers.approx_equal( calculate.gaussian(numpy.array([-1.0, 0.0, 0.5, 1.0])), [0.367879, 1.0, 0.778801, 0.367879]) assert helpers.approx_equal( calculate.gaussian(numpy.array([-1.0, 0.0, 0.5, 1.0]), variance=0.5), [0.135335, 1.0, 0.606531, 0.135335])
def test_RBF_activate_high_distance_scale_by_similarity(): """RBF may return nan if sum of similarities == 0 and it scales by similarity.""" from learning import SOM random.seed(0) numpy.random.seed(0) clustering_model = SOM(1, 2, neighborhood=0) clustering_model.logging = False model = model = rbf.RBF( 1, 2, 1, variance=1.0, scale_by_similarity=True, clustering_model=clustering_model) model._pre_train(numpy.array([[0], [1]]), numpy.array([[0], [1]])) assert helpers.approx_equal(model._clustering_model.activate([0]), [0, 1]) assert not numpy.isnan(model.activate(numpy.array([1000.]))).any() assert helpers.approx_equal(model._similarity_tensor, [0.5, 0.5]) assert not numpy.isnan(model.activate(numpy.array([[0.], [1000.]]))).any() assert helpers.approx_equal(model._similarity_tensor, [[0.73105858, 0.26894142], [0.5, 0.5]])
def test_softmax_transfer(): assert list(calculate.softmax(numpy.array([1.0, 1.0]))) == [0.5, 0.5] assert helpers.approx_equal(calculate.softmax(numpy.array([1.0, 0.0])), [0.7310585, 0.2689414]) softmax_out = calculate.softmax(numpy.array([1.0, -1.0])) assert softmax_out[0] > 0.5 and softmax_out[1] < 0.5 assert helpers.approx_equal(sum(softmax_out), 1.0)
def test_MLP_activate_matrix(): model = mlp.MLP((2, 2, 2), transfers=[LinearTransfer(), LinearTransfer()]) # Set weights for deterministic results model._bias_vec = numpy.ones(model._bias_vec.shape) model._weight_matrices = [numpy.ones(weight_matrix.shape) for weight_matrix in model._weight_matrices] # Activate assert helpers.approx_equal(model.activate([[0, 0], [0.5, 0.5]]), [[2, 2], [4, 4]]) assert helpers.approx_equal(model.activate([[1, 0], [0.5, 1]]), [[4, 4], [5, 5]]) assert helpers.approx_equal(model.activate([[1, 1], [0, 0.5]]), [[6, 6], [3, 3]])
def test_SOM_activate_matrix(): som_ = som.SOM(2, 2) som_._weights = numpy.ones(som_._weights.shape) assert helpers.approx_equal(som_.activate([[1, 1], [0, 1]]), [[0, 0], [1, 1]]) assert helpers.approx_equal(som_.activate([[1, 0], [1, 2]]), [[1, 1], [1, 1]]) assert helpers.approx_equal(som_.activate([[1, 3], [0.2, 1.6]]), [[2, 2], [1, 1]]) assert helpers.approx_equal(som_.activate( [[1.8, 1.6], [0, 0]]), [[1, 1], [1.4142135623730951, 1.4142135623730951]])
def test_bfgs_eq(): """Should satisfy certain requirements. min_{H_{k+1}} ||H_{k+1} - H_k||, subject to H_{k+1} = H_{k+1}^T and H_{k+1} y_k = s_k. """ H_k = numpy.identity(2) s_k = numpy.array([-8.0, -8.0]) - numpy.array([10.0, 10.0]) y_k = numpy.array([20., 20.]) - numpy.array([-16., -16.]) H_kp1 = optimizer._bfgs_eq(H_k, s_k, y_k) # TODO: Check minimize condition (use scipy.minimize with constraints) assert helpers.approx_equal(H_kp1.T, H_kp1) assert helpers.approx_equal(H_kp1.dot(y_k), s_k)
def test_softmax_vector(): assert list(calculate.softmax(numpy.array([1.0, 1.0]))) == [0.5, 0.5] assert helpers.approx_equal(calculate.softmax(numpy.array([1.0, 0.0])), [0.7310585, 0.2689414]) softmax_out = calculate.softmax(numpy.array([1.0, -1.0])) assert softmax_out[0] > 0.5 and softmax_out[1] < 0.5 assert helpers.approx_equal(sum(softmax_out), 1.0) shape = random.randint(2, 10) softmax_out = calculate.softmax( numpy.array(sorted(numpy.random.random(shape)))) assert sorted(softmax_out) == list(softmax_out) assert helpers.approx_equal(sum(softmax_out), 1.0)
def test_softmax_matrix(): assert helpers.approx_equal( calculate.softmax(numpy.array([[1.0, 1.0], [1.0, 0.0]])), [[0.5, 0.5], [0.7310585, 0.2689414]]) assert helpers.approx_equal( calculate.softmax(numpy.array([[1.0, 0.0], [0.5, 0.5]])), [[0.7310585, 0.2689414], [0.5, 0.5]]) shape = (random.randint(2, 10), random.randint(2, 10)) softmax_out = calculate.softmax( numpy.sort(numpy.random.random(shape), axis=1)) assert (numpy.sort(softmax_out, axis=1) == softmax_out).all() assert helpers.approx_equal(numpy.sum(softmax_out, axis=1), numpy.ones(shape[0]))
def test_logit(): assert calculate.logit(0) == 0.5 assert helpers.approx_equal( calculate.logit(numpy.array([-1.0, 0.0, 0.5, 1.0])), [ 0.26894142, 0.5, 0.6224593, 0.73105857, ])
def test_mean_list_of_list_of_matrices(): lol_matrices = [[ numpy.array([[1, 2], [3, 4]]), numpy.array([[-1, -2], [-3, -4]]) ], [numpy.array([[1, 2], [3, 4]]), numpy.array([[1, 2], [3, 4]])]] assert helpers.approx_equal( mlp._mean_list_of_list_of_matrices(lol_matrices), [numpy.array([[1, 2], [3, 4]]), numpy.array([[0, 0], [0, 0]])])
def test_SOM_activate_vector(): som_ = som.SOM(2, 2) som_._weights = numpy.ones(som_._weights.shape) assert helpers.approx_equal(som_.activate([1, 1]), [0, 0]) assert helpers.approx_equal(som_.activate([0, 1]), [1, 1]) assert helpers.approx_equal(som_.activate([1, 0]), [1, 1]) assert helpers.approx_equal(som_.activate([1, 2]), [1, 1]) assert helpers.approx_equal(som_.activate([1, 3]), [2, 2]) assert helpers.approx_equal(som_.activate([0.2, 1.6]), [1, 1]) assert helpers.approx_equal(som_.activate([1.8, 1.6]), [1, 1])
def test_rbf_obj_and_obj_jac_match(): """obj and obj_jac functions should return the same obj value.""" attrs = random.randint(1, 10) outs = random.randint(1, 10) model = rbf.RBF(attrs, random.randint(1, 10), outs) dataset = datasets.get_random_regression(10, attrs, outs) # Don't use exactly the same parameters, to ensure obj functions are actually # using the given parameters parameters = random.uniform(-1.0, 1.0) * model._weight_matrix.ravel() assert helpers.approx_equal( model._get_obj(parameters, dataset[0], dataset[1]), model._get_obj_jac(parameters, dataset[0], dataset[1])[0])
def test_normalize(): random_matrix = numpy.random.rand(random.randint(2, 10), random.randint(1, 10)) print 'Generated Matrix:' print random_matrix normalized_matrix = preprocess.normalize(random_matrix) assert random_matrix.shape == normalized_matrix.shape # Original matrix should be unchanged assert not numpy.array_equal(random_matrix, normalized_matrix) # Normalized matrix should have mean of 0 standard deviation of 1 # for each dimension means = numpy.mean(normalized_matrix, 0) for mean in means: print mean assert helpers.approx_equal(mean, 0, tol=1e-10) sds = numpy.std(normalized_matrix, 0) for sd in sds: print sd assert helpers.approx_equal(sd, 1, tol=1e-10)
def _check_obj_and_obj_jac_match(make_model_func, classification=False): """obj and obj_jac functions should return the same obj value.""" attrs = random.randint(1, 10) outs = random.randint(1, 10) model = make_model_func(attrs, random.randint(1, 10), outs) if classification: dataset = datasets.get_random_classification(10, attrs, outs) else: dataset = datasets.get_random_regression(10, attrs, outs) # Don't use exactly the same parameters, to ensure obj functions are actually # using the given parameters parameters = random.uniform(-1.0, 1.0) * mlp._flatten( model._weight_matrices) assert helpers.approx_equal( mlp._mlp_obj(model, dataset[0], dataset[1], parameters), mlp._mlp_obj_jac(model, dataset[0], dataset[1], parameters)[0])
def test_LBFGS_approx_equal_BFGS_infinite_num_remembered_iterations(): # When LBFGS has no limit on remembered iterations, it should approximately # equal BFGS, given initial hessian is the same on all iterations # "During its first m - 1 iterations, # Algorithm 7.5 is equivalent to the BFGS algorithm of Chapter 6 # if the initial matrix H_0 is the same in both methods, # and if L-BFGS chooses H_0^k = H_0 at each iteration." # ~ Numerical Optimization 2nd pp. 179 # Rosenbrock function f = lambda vec: 100.0 * (vec[1] - vec[0]**2)**2 + (vec[0] - 1.0)**2 df = lambda vec: numpy.array([ 2.0 * (200.0 * vec[0]**3 - 200.0 * vec[0] * vec[1] + vec[0] - 1.0), 200.0 * (vec[1] - vec[0]**2) ]) problem = Problem(obj_func=f, jac_func=df) # Optimize bfgs_vec = numpy.random.random(2) lbfgs_vec = numpy.copy(bfgs_vec) # Same identity hessian, for both optimizers bfgs_optimizer = BFGS( step_size_getter=WolfeLineSearch(), initial_hessian_func=optimizer.initial_hessian_identity) lbfgs_optimizer = LBFGS( step_size_getter=WolfeLineSearch(), num_remembered_iterations=float('inf'), initial_hessian_scalar_func=optimizer.initial_hessian_one_scalar) for i in range(10): _, bfgs_vec = bfgs_optimizer.next(problem, bfgs_vec) _, lbfgs_vec = lbfgs_optimizer.next(problem, lbfgs_vec) print i assert helpers.approx_equal(bfgs_vec, lbfgs_vec)
def test_big_relu(): """Naive relu can overflow with large input values.""" assert helpers.approx_equal(calculate.relu(numpy.array([0., 1000.])), [0.6931471805, 1000])
def test_relu_transfer(): assert helpers.approx_equal(calculate.relu(numpy.array([0, 1])), [0.6931471805, 1.3132616875]) assert helpers.approx_equal(calculate.relu(numpy.array([-1.5, 10])), [0.201413, 10.00004539])
def test_drelu_simple(): assert helpers.approx_equal(calculate.drelu(numpy.array([0, 1])), [0.5, 0.73105857]) assert helpers.approx_equal(calculate.drelu(numpy.array([-1.5, 10])), [0.182426, 0.9999546])
def test_tanh(): assert helpers.approx_equal( calculate.tanh(numpy.array([-1.0, 0.0, 0.5, 1.0])), [-0.761594, 0.0, 0.462117, 0.761594])
def test_big_drelu_simple(): """Naive relu can overflow with large input values.""" assert helpers.approx_equal(calculate.drelu(numpy.array([0., 1000.])), [0.5, 1.0])