コード例 #1
0
def test_logistic_derivative(transformer_factory):
    # bprop is on the output
    inputs = np.array([0, 1, -2], dtype=np.float).reshape((3, 1))
    f = 1.0 / (1.0 + np.exp(-inputs))
    outputs = f * (1.0 - f)
    compare_tensors(Logistic(),
                    inputs, outputs, deriv=True, tol=1e-7)
コード例 #2
0
ファイル: test_activations.py プロジェクト: rsumner31/ngraph
class LogisticPair(ActivationPair):
    neon_activation = Logistic()
    tolerance = 1e-7

    def reference_value(self, x):
        return 1.0 / (1.0 + np.exp(-x))

    def reference_derivative(self, x):
        f = self.reference_value(x)
        return f * (1.0 - f)
コード例 #3
0
ファイル: test_scope_optimizer.py プロジェクト: ami-GS/ngraph
 def make_network(scope1=None, scope2=None):
     # 2 layer network, each layer has its own scope
     x = ng.placeholder(axes)  # inputs
     t = ng.placeholder(ng.make_axes([ng.make_axis(length=1),
                                      N]))  # targets
     with Layer.variable_scope(scope1):
         layer1 = Affine(ConstantInit(val=Wlin1),
                         nout=nout1,
                         bias_init=ConstantInit(val=Wbias1),
                         activation=Rectlin(),
                         batch_norm=False)
     with Layer.variable_scope(scope2):
         layer2 = Affine(ConstantInit(val=Wlin2),
                         nout=1,
                         bias_init=ConstantInit(val=Wbias2),
                         activation=Logistic(),
                         batch_norm=False)
     seq = Sequential([layer1, layer2])
     p_t = seq(x)
     t_cast = ng.cast_axes(t, p_t.axes)  # TODO: how can this be avoided?
     loss = ng.cross_entropy_binary(p_t, t_cast)
     return seq, x, t, loss
コード例 #4
0
def test_logistic(transformer_factory):
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    outputs = 1.0 / (1.0 + np.exp(-inputs)).reshape((3, 1))
    compare_tensors(Logistic(), inputs, outputs, tol=1e-7)