Exemplo n.º 1
0
def test_logistic_cputensor():
    lgstc = Logistic()
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    be = CPU(rng_seed=0)
    temp = be.zeros((3, 1))
    outputs = 1.0 / (1.0 + np.exp(-inputs))
    lgstc.apply_function(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Exemplo n.º 2
0
def test_logistic_derivative_cputensor():
    lgstc = Logistic()
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    be = CPU(rng_seed=0)
    outputs = 1.0 / (1.0 + np.exp(-inputs))
    outputs = outputs * (1.0 - outputs)
    temp = be.zeros(inputs.shape)
    lgstc.apply_derivative(be, CPUTensor(inputs), temp)
    assert_tensor_near_equal(CPUTensor(outputs), temp)
Exemplo n.º 3
0
def test_logistic_cc2tensor():
    lgstc = Logistic()
    from neon.backends.cc2 import GPU, GPUTensor
    inputs = np.array([0, 1, -2]).reshape((3, 1))
    outputs = 1.0 / (1.0 + np.exp(-inputs))
    be = GPU(rng_seed=0)
    temp = be.zeros((3, 1))
    lgstc.apply_function(be, GPUTensor(inputs), temp)
    assert_tensor_near_equal(GPUTensor(outputs), temp)
Exemplo n.º 4
0
    def setup(self):

        from neon.backends.cc2 import GPU, GPUTensor

        # TODO: remove randomness from expected target results
        self.be = GPU(rng_seed=0)

        # reusable fake data
        self.inputs = GPUTensor(np.ones((2, 100)))

        # create fake layer
        nin = 2
        conf = {
            'name': 'testlayer',
            'num_nodes': 2,
            'weight_init': GaussianValGen(backend=self.be, loc=0.0, scale=0.01)
        }
        lr_params = {'learning_rate': 0.01}
        thislr = {'type': 'gradient_descent', 'lr_params': lr_params}
        activation = Logistic()
        self.layer = RBMLayer(name=conf['name'])
        # create fake cost
        self.cost = SumSquaredDiffs(olayer=self.layer)
        self.layer.initialize({
            'backend': self.be,
            'batch_size': 100,
            'lrule_init': thislr,
            'nin': nin,
            'nout': conf['num_nodes'],
            'activation': activation,
            'weight_init': conf['weight_init']
        })