Example #1
0
 def test_sse(self):
     ss = SSE()
     assert ss.value == 0.0
     refs = CPUTensor([0.00, 1, 0.7, -0.4])
     preds = CPUTensor([0.2, -1, 0.5, 2.4])
     ss.add(refs, preds)
     assert abs(ss.report() - (0.2**2 + 2**2 + 0.2**2 + 2.8**2)) < 1e-5
Example #2
0
 def test_neginfnorm(self):
     tsr = self.be.array([[-1, 0], [1, 3]])
     # -> min(abs(tsr), axis=0) -> [1, 0]
     assert_tensor_equal(self.be.norm(tsr, order=float('-inf'), axis=0),
                         CPUTensor([[1, 0]]))
     # -> min(abs(tsr), axis=1) -> [0, 1]
     assert_tensor_equal(self.be.norm(tsr, order=float('-inf'), axis=1),
                         CPUTensor([0, 1]))
Example #3
0
 def test_0norm(self):
     tsr = self.be.array([[-1, 0], [1, 3]])
     # -> sum(tsr != 0, axis=0) -> [2, 1]
     assert_tensor_equal(self.be.norm(tsr, order=0, axis=0),
                         CPUTensor([[2, 1]]))
     # -> sum(tsr != 0, axis=1) -> [1, 2]
     assert_tensor_equal(self.be.norm(tsr, order=0, axis=1),
                         CPUTensor([1, 2]))
Example #4
0
 def test_infnorm(self):
     tsr = self.be.array([[-1, 0], [1, 3]])
     # -> max(abs(tsr), axis=0) -> [1, 3]
     assert_tensor_equal(self.be.norm(tsr, order=float('inf'), axis=0),
                         CPUTensor([[1, 3]]))
     # -> max(abs(tsr), axis=1) -> [1, 3]
     assert_tensor_equal(self.be.norm(tsr, order=float('inf'), axis=1),
                         CPUTensor([1, 3]))
Example #5
0
 def test_sse_mat(self):
     ss = SSE()
     assert ss.value == 0.0
     refs = CPUTensor([[0, 1, 0.7], [0.5, -3, 0]])
     preds = CPUTensor([[0.2, 1, 0.5], [0, -5.5, 0.10]])
     ss.add(refs, preds)
     assert abs(ss.report() -
                (0.2**2 + 0**2 + 0.2**2 + 0.5**2 + 2.5**2 + .10**2)) < 1e-5
Example #6
0
 def test_1norm(self):
     tsr = self.be.array([[-1, 0], [1, 3]])
     # -> sum([[1, 0], [1, 3]], axis=0)**1 -> [2, 3]
     assert_tensor_equal(self.be.norm(tsr, order=1, axis=0),
                         CPUTensor([[2, 3]]))
     # -> sum([[1, 0], [1, 3]], axis=1)**1 -> [1, 4]
     assert_tensor_equal(self.be.norm(tsr, order=1, axis=1),
                         CPUTensor([1, 4]))
Example #7
0
 def test_mse(self):
     ms = MSE()
     assert ms.value == 0.0
     refs = CPUTensor([0.00, 1, 0.7, -0.4])
     preds = CPUTensor([0.2, -1, 0.5, 2.4])
     ms.add(refs, preds)
     assert abs(ms.report() -
                ((0.2**2 + 2**2 + 0.2**2 + 2.8**2) / 4.0)) < 1e-5
Example #8
0
 def test_range_slicing(self):
     tns = CPUTensor([[1, 2], [3, 4]])
     res = tns[0:2, 0]
     expected_shape = (2, )
     while len(expected_shape) < res._min_dims:
         expected_shape += (1, )
     assert res.shape == expected_shape
     assert_tensor_equal(res, CPUTensor([1, 3]))
Example #9
0
def test_softmax_cputensor():
    sftmx = Softmax()
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    be = CPU(rng_seed=0)
    temp = be.zeros((3, 1))
    outputs = np.exp(inputs - 1) / np.sum(np.exp(inputs - 1))
    sftmx.apply_function(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Example #10
0
def test_logistic_cputensor():
    lgstc = Logistic()
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    be = CPU(rng_seed=0)
    temp = be.zeros((3, 1))
    outputs = 1.0 / (1.0 + np.exp(-inputs))
    lgstc.apply_function(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Example #11
0
 def test_2norm(self):
     tsr = self.be.array([[-1, 0], [1, 3]])
     rpow = 1. / 2
     # -> sum([[1, 0], [1, 9]], axis=0)**.5 -> sqrt([2, 9])
     assert_tensor_equal(self.be.norm(tsr, order=2, axis=0),
                         CPUTensor([[2**rpow, 9**rpow]]))
     # -> sum([[1, 0], [1, 9]], axis=1)**.5 -> sqrt([1, 10])
     assert_tensor_equal(self.be.norm(tsr, order=2, axis=1),
                         CPUTensor([1**rpow, 10**rpow]))
Example #12
0
def test_tanh_derivative_cputensor():
    tntest = Tanh()
    inputs = np.array([0, 1, -2])
    be = CPU(rng_seed=0)
    outputs = np.array(
        [1 - true_tanh(0)**2, 1 - true_tanh(1)**2, 1 - true_tanh(-2)**2])
    temp = be.zeros(inputs.shape)
    tntest.apply_derivative(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Example #13
0
def test_tanh_cputensor():
    tntest = Tanh()
    be = CPU(rng_seed=0)
    CPUTensor([0, 1, -2])
    inputs = np.array([0, 1, -2])
    temp = be.zeros(inputs.shape)
    outputs = np.array([true_tanh(0), true_tanh(1), true_tanh(-2)])
    tntest.apply_function(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Example #14
0
 def test_misclass_sum_add_binary(self):
     mcs = MisclassSum()
     assert mcs.rec_count == 0
     assert mcs.misclass_sum == 0
     refs = CPUTensor([[0, 1, 0, 0]])
     preds = CPUTensor([[1, 1, 0, 1]])
     mcs.add(refs, preds)
     assert mcs.rec_count == 4
     assert mcs.misclass_sum == 2
Example #15
0
def test_logistic_derivative_cputensor():
    lgstc = Logistic()
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    be = CPU(rng_seed=0)
    outputs = 1.0 / (1.0 + np.exp(-inputs))
    outputs = outputs * (1.0 - outputs)
    temp = be.zeros(inputs.shape)
    lgstc.apply_derivative(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Example #16
0
 def test_logloss_sum_mixed(self):
     ll = LogLossSum()
     assert ll.logloss == 0.0
     refs = CPUTensor([1, 0, 1]).transpose()
     preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.75], [0.01, 0, 0.15],
                        [0.90, 0, 0.10]])
     ll.add(refs, preds)
     assert abs(ll.report() + (math.log(.09) + math.log(1.0 - ll.eps) +
                               math.log(0.75))) < 1e-6
Example #17
0
 def test_mse_mat(self):
     ms = MSE()
     assert ms.value == 0.0
     assert ms.rec_count == 0.0
     refs = CPUTensor([[0, 1, 0.7], [0.5, -3, 0]])
     preds = CPUTensor([[0.2, 1, 0.5], [0, -5.5, 0.10]])
     ms.add(refs, preds)
     assert abs(ms.report() - (
         (0.2**2 + 0**2 + 0.2**2 + 0.5**2 + 2.5**2 + .10**2) / 6.0)) < 1e-6
Example #18
0
def compare_cpu_tensors(inputs, outputs, deriv=False):
    rlin = RectLeaky()
    be = CPU()
    temp = be.zeros(inputs.shape)
    if deriv is True:
        rlin.apply_derivative(be, CPUTensor(inputs), temp)
    else:
        rlin.apply_function(be, CPUTensor(inputs), temp)
    be.subtract(temp, CPUTensor(outputs), temp)
    assert_tensor_equal(temp, be.zeros(inputs.shape))
Example #19
0
 def test_misclass_rate_report(self):
     mcr = MisclassRate()
     assert mcr.rec_count == 0
     assert mcr.misclass_sum == 0
     refs = CPUTensor([[0, 1, 0, 0]])
     preds = CPUTensor([[1, 1, 0, 1]])
     mcr.add(refs, preds)
     assert mcr.rec_count == 4
     assert mcr.misclass_sum == 2
     assert mcr.report() == 0.5
Example #20
0
 def test_misclass_pct_report(self):
     mcp = MisclassPercentage()
     assert mcp.rec_count == 0
     assert mcp.misclass_sum == 0
     refs = CPUTensor([[0, 1, 0, 0]])
     preds = CPUTensor([[1, 1, 0, 1]])
     mcp.add(refs, preds)
     assert mcp.rec_count == 4
     assert mcp.misclass_sum == 2
     assert mcp.report() == 50.0
Example #21
0
 def test_lrgnorm(self):
     tsr = self.be.array([[-1, 0], [1, 3]])
     rpow = 1. / 5
     # -> sum([[1, 0], [1, 243]], axis=0)**rpow -> rpow([2, 243])
     assert_tensor_equal(self.be.norm(tsr, order=5, axis=0),
                         CPUTensor([[2**rpow, 243**rpow]]))
     # -> sum([[1, 0], [1, 243]], axis=1)**rpow -> rpow([1, 244])
     # 244**.2 == ~3.002465 hence the near_equal test
     assert_tensor_near_equal(self.be.norm(tsr, order=5, axis=1),
                              CPUTensor([1**rpow, 244**rpow]), 1e-6)
Example #22
0
 def test_misclass_sum_mixed(self):
     mcs = MisclassSum()
     assert mcs.rec_count == 0
     assert mcs.misclass_sum == 0
     refs = CPUTensor([[3, 0, 1]])
     preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.55], [0.01, 0, 0.75],
                        [0.90, 0, 0.34]])
     mcs.add(refs, preds)
     assert mcs.rec_count == 3
     assert mcs.misclass_sum == 1
Example #23
0
def test_cross_entropy_derivative_cputensor():
    be = CPU(rng_seed=0)
    outputs = CPUTensor([0.5, 0.9, 0.1, 0.0001])
    targets = CPUTensor([0.5, 0.99, 0.01, 0.2])
    temp = [be.zeros(outputs.shape), be.zeros(outputs.shape)]
    expected_result = ((outputs.asnumpyarray() - targets.asnumpyarray()) /
                       (outputs.asnumpyarray() * (1 - outputs.asnumpyarray())))
    assert_tensor_near_equal(expected_result,
                             cross_entropy_derivative(be, outputs,
                                                      targets, temp))
Example #24
0
 def test_auc_add_binary(self):
     auc = AUC()
     assert auc.num_pos == 0
     assert auc.num_neg == 0
     refs = CPUTensor([[0, 1, 0, 0]])
     preds = CPUTensor([[1, 1, 0, 1]])
     auc.add(refs, preds)
     assert auc.num_pos == 1
     assert auc.num_neg == 3
     assert len(auc.probs) == 4
     assert len(auc.labels) == 4
Example #25
0
 def test_logloss_mean(self):
     ll = LogLossMean()
     assert ll.logloss == 0.0
     assert ll.rec_count == 0.0
     refs = CPUTensor([[0, 1, 0], [1, 0, 1], [0, 0, 0], [0, 0, 0]])
     preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.75], [0.01, 0, 0.15],
                        [0.90, 0, 0.10]])
     ll.add(refs, preds)
     assert abs(ll.report() +
                (math.log(.09) + math.log(1.0 - ll.eps) + math.log(0.75)) /
                3.0) < 1e-6
Example #26
0
 def test_negnorm(self):
     tsr = self.be.array([[-1, -2], [1, 3]])
     rpow = -1. / 3
     # -> sum([[1, .125], [1, .037037]], axis=0)**rpow -> rpow([2, .162037])
     assert_tensor_equal(self.be.norm(tsr, order=-3, axis=0),
                         CPUTensor([[2**rpow, .162037037037**rpow]]))
     # -> sum([[1, .125], [1, .037037]], axis=1)**rpow ->
     # rpow([1.125, 1.037037])
     assert_tensor_near_equal(self.be.norm(tsr, order=-3, axis=1),
                              CPUTensor([1.125**rpow, 1.037037**rpow]),
                              1e-6)
Example #27
0
 def test_misclass_sum_top3probs(self):
     mcs = MisclassSum(error_rank=3)
     assert mcs.rec_count == 0
     assert mcs.misclass_sum == 0
     refs = CPUTensor([[0.03, 0.80, 0.81], [0.20, 0.02, 0.15],
                       [0.31, 0.08, 0.01], [0.46, 0.10, 0.03]])
     preds = CPUTensor([[0.00, 1, 0.34], [0.09, 0.0, 0.55], [0.01, 0, 0.75],
                        [0.90, 0, 0.00]])
     mcs.add(refs, preds)
     assert mcs.rec_count == 3
     assert mcs.misclass_sum == 0
Example #28
0
def test_softmax_derivative_cputensor():
    sftmx = Softmax()
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    be = CPU(rng_seed=0)
    outputs = np.exp(inputs) / np.sum(np.exp(inputs))
    errmat = np.ones(inputs.shape)
    a = np.einsum('ij,ji->i', errmat.T, outputs)
    outputs = outputs * (errmat - a[np.newaxis, :])
    temp = be.zeros(inputs.shape)
    sftmx.apply_derivative(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Example #29
0
 def test_auc_add_mixed(self):
     auc = AUC()
     assert auc.num_pos == 0
     assert auc.num_neg == 0
     refs = CPUTensor([[0, 1, 0]])
     preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.75], [0.01, 0, 0.15],
                        [0.90, 0, 0.10]])
     auc.add(refs, preds)
     assert auc.num_pos == 1
     assert auc.num_neg == 2
     assert len(auc.probs) == 3
     assert len(auc.labels) == 3
Example #30
0
 def test_auc_add_probs(self):
     auc = AUC()
     assert auc.num_pos == 0
     assert auc.num_neg == 0
     refs = CPUTensor([[0.03, 0.80, 0.01], [0.20, 0.02, 0.80],
                       [0.31, 0.08, 0.01], [0.46, 0.10, 0.03]])
     preds = CPUTensor([[0.00, 1, 0], [0.09, 0.0, 0.75], [0.01, 0, 0.15],
                        [0.90, 0, 0.10]])
     auc.add(refs, preds)
     assert auc.num_pos == 1
     assert auc.num_neg == 2
     assert len(auc.probs) == 3
     assert len(auc.labels) == 3
Example #31
0
 def test_greater(self):
     left = self.be.array([[-1, 0], [1, 92]])
     right = self.be.ones([2, 2])
     out = self.be.empty([2, 2])
     self.be.greater(left, right, out)
     assert out.shape == (2, 2)
     assert_tensor_equal(out, CPUTensor([[0, 0], [0, 1]]))
Example #32
0
def test_cross_entropy_cputensor():
    be = CPU(rng_seed=0)
    outputs = CPUTensor([0.5, 0.9, 0.1, 0.0001])
    targets = CPUTensor([0.5, 0.99, 0.01, 0.2])
    temp = [be.zeros(outputs.shape), be.zeros(outputs.shape)]
    expected_result = np.sum((- targets.asnumpyarray()) *
                             np.log(outputs.asnumpyarray()) -
                             (1 - targets.asnumpyarray()) *
                             np.log(1 - outputs.asnumpyarray()), keepdims=True)
    assert_tensor_near_equal(expected_result, cross_entropy(be, outputs,
                                                            targets, temp))
Example #33
0
 def test_asnumpyarray(self):
     tns = CPUTensor([[1, 2], [3, 4]])
     res = tns.asnumpyarray()
     assert isinstance(res, np.ndarray)
     assert_tensor_equal(res, np.array([[1, 2], [3, 4]]))
Example #34
0
 def test_transpose(self):
     tns = CPUTensor([[1, 2], [3, 4]])
     res = tns.transpose()
     assert_tensor_equal(res, CPUTensor([[1, 3], [2, 4]]))
Example #35
0
 def test_fill(self):
     tns = CPUTensor([[1, 2], [3, 4]])
     tns.fill(-9.5)
     assert_tensor_equal(tns, CPUTensor([[-9.5, -9.5], [-9.5, -9.5]]))