コード例 #1
0
def test_leaky_rectlin_derivative_negatives(transformer_factory):
    """
    ngraph derivative for negative values is 0, not the slope
    """
    slope = 0.2
    inputs = np.array([[-1, -3], [-2, -4]], dtype=np.float32)
    outputs = np.array([[0, 0], [0, 0]]) + slope
    compare_tensors(Rectlin(slope=slope), inputs, outputs, deriv=True, tol=1e-7)
コード例 #2
0
ファイル: test_activations.py プロジェクト: rsumner31/ngraph
class RectlinPair(ActivationPair):
    neon_activation = Rectlin()

    def reference_value(self, x):
        return np.maximum(x, 0)

    def reference_derivative(self, x):
        return np.greater(x, 0).astype(np.float32)
コード例 #3
0
ファイル: test_activations.py プロジェクト: rsumner31/ngraph
class LeakyRectlinPair(ActivationPair):
    slope = 0.2
    neon_activation = Rectlin(slope=0.2)

    def reference_value(self, x):
        return np.maximum(x, 0) + np.minimum(x, 0) * self.slope

    def reference_derivative(self, x):
        return np.greater(x, 0) + np.less(x, 0) * self.slope
コード例 #4
0
ファイル: test_scope_optimizer.py プロジェクト: ami-GS/ngraph
 def make_network(scope1=None, scope2=None):
     # 2 layer network, each layer has its own scope
     x = ng.placeholder(axes)  # inputs
     t = ng.placeholder(ng.make_axes([ng.make_axis(length=1),
                                      N]))  # targets
     with Layer.variable_scope(scope1):
         layer1 = Affine(ConstantInit(val=Wlin1),
                         nout=nout1,
                         bias_init=ConstantInit(val=Wbias1),
                         activation=Rectlin(),
                         batch_norm=False)
     with Layer.variable_scope(scope2):
         layer2 = Affine(ConstantInit(val=Wlin2),
                         nout=1,
                         bias_init=ConstantInit(val=Wbias2),
                         activation=Logistic(),
                         batch_norm=False)
     seq = Sequential([layer1, layer2])
     p_t = seq(x)
     t_cast = ng.cast_axes(t, p_t.axes)  # TODO: how can this be avoided?
     loss = ng.cross_entropy_binary(p_t, t_cast)
     return seq, x, t, loss
コード例 #5
0
def test_rectlin_derivative_mixed(transformer_factory):
    inputs = np.array([[4, 0], [-2, 9]])
    outputs = np.array([[1, 0], [0, 1]])
    compare_tensors(Rectlin(), inputs, outputs, deriv=True)
コード例 #6
0
def test_rectlin_derivative_negatives(transformer_factory):
    inputs = np.array([[-1, -3], [-2, -4]])
    outputs = np.array([[0, 0], [0, 0]])
    compare_tensors(Rectlin(), inputs, outputs, deriv=True)
コード例 #7
0
def test_rectlin_derivative_positives(transformer_factory):
    inputs = np.array([1, 3, 2]).reshape((3, 1))
    outputs = np.array([1, 1, 1]).reshape((3, 1))
    compare_tensors(Rectlin(), inputs, outputs, deriv=True)
コード例 #8
0
def test_rectlin_mixed(transformer_factory):
    inputs = np.array([[4, 0], [-2, 9]])
    outputs = np.array([[4, 0], [0, 9]])
    compare_tensors(Rectlin(), inputs, outputs)
コード例 #9
0
def test_leaky_rectlin_derivative_mixed(transformer_factory):
    slope = 0.2
    inputs = np.array([[4, 0], [-2, 9]], dtype=np.float32)
    outputs = np.array([[1, 0], [slope, 1]])
    compare_tensors(Rectlin(slope=slope), inputs, outputs, deriv=True, tol=1e-7)
コード例 #10
0
def test_leaky_rectlin_mixed(transformer_factory):
    slope = 0.2
    inputs = np.array([[4, 0], [-2, 9]])
    outputs = np.array([[4, 0], [-2 * slope, 9]])
    compare_tensors(Rectlin(slope=slope), inputs, outputs, tol=1e-7)
コード例 #11
0
def test_leaky_rectlin_negatives(transformer_factory):
    slope = 0.2
    inputs = np.array([[-1, -3], [-2, -4]])
    outputs = inputs * slope
    compare_tensors(Rectlin(slope=slope), inputs, outputs, tol=1e-7)
コード例 #12
0
def test_leaky_rectlin_positives(transformer_factory):
    slope = 0.2
    inputs = np.array([1, 3, 2]).reshape((3, 1))
    outputs = np.array([1, 3, 2]).reshape((3, 1))
    compare_tensors(Rectlin(slope=slope), inputs, outputs)