Exemplo n.º 1
0
    def get_output_for(self, input, only_at_anchor=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)

        # ## calculate attention anchor position based on atw, atb and input x
        at_anchor = nonlinearities.rectify(T.dot(input, self.atw) + self.atb[0])
        at_anchor = T.minimum(at_anchor, 1)
        at_anchor *= self.num_units

        self.at_anchor = at_anchor  # for printing
        # print_op = printing.Print('attention')
        # at_anchor = print_op(at_anchor)
        if only_at_anchor:
            return at_anchor

        # ## normal dense layer activation output
        activation = T.dot(input, self.W)

        if self.b is not None:
            activation = activation + self.b.dimshuffle('x', 0)

        out = self.nonlinearity(activation)

        ### multiply activation with attention weight
        attention = T.exp(
            self.at_decay * (
                T.arange(0, self.num_units).dimshuffle('x', 0) -
                at_anchor.dimshuffle(0, 'x')
            ) ** 2)

        out *= attention
        return out
Exemplo n.º 2
0
def test_binary_hinge_loss():
    from lasagne.objectives import binary_hinge_loss
    from lasagne.nonlinearities import rectify
    p = theano.tensor.vector('p')
    t = theano.tensor.ivector('t')
    c = binary_hinge_loss(p, t)
    # numeric version
    floatX = theano.config.floatX
    predictions = np.random.rand(10).astype(floatX)
    targets = np.random.random_integers(0, 1, (10,)).astype("int8")
    hinge = rectify(1 - predictions * (2 * targets - 1))
    # compare
    assert np.allclose(hinge, c.eval({p: predictions, t: targets}))
Exemplo n.º 3
0
def test_binary_hinge_loss():
    from lasagne.objectives import binary_hinge_loss
    from lasagne.nonlinearities import rectify
    p = theano.tensor.vector('p')
    t = theano.tensor.ivector('t')
    c = binary_hinge_loss(p, t)
    # numeric version
    floatX = theano.config.floatX
    predictions = np.random.rand(10).astype(floatX)
    targets = np.random.random_integers(0, 1, (10,)).astype("int8")
    hinge = rectify(1 - predictions * (2 * targets - 1))
    # compare
    assert np.allclose(hinge, c.eval({p: predictions, t: targets}))
Exemplo n.º 4
0
def test_multiclass_hinge_loss():
    from lasagne.objectives import mutliclass_hinge_loss
    from lasagne.nonlinearities import rectify
    p = theano.tensor.matrix('p')
    t = theano.tensor.ivector('t')
    c = mutliclass_hinge_loss(p, t)
    # numeric version
    floatX = theano.config.floatX
    predictions = np.random.rand(10, 20).astype(floatX)
    targets = np.random.random_integers(0, 19, (10,)).astype("int8")
    one_hot = np.zeros((10, 20))
    one_hot[np.arange(10), targets] = 1
    correct = predictions[one_hot > 0]
    rest = predictions[one_hot < 1].reshape((10, 19))
    rest = np.max(rest, axis=1)
    hinge = rectify(1 + rest - correct)
    # compare
    assert np.allclose(hinge, c.eval({p: predictions, t: targets}))
Exemplo n.º 5
0
def test_multiclass_hinge_loss():
    from lasagne.objectives import multiclass_hinge_loss
    from lasagne.nonlinearities import rectify
    p = theano.tensor.matrix('p')
    t = theano.tensor.ivector('t')
    c = multiclass_hinge_loss(p, t)
    # numeric version
    floatX = theano.config.floatX
    predictions = np.random.rand(10, 20).astype(floatX)
    targets = np.random.random_integers(0, 19, (10,)).astype("int8")
    one_hot = np.zeros((10, 20))
    one_hot[np.arange(10), targets] = 1
    correct = predictions[one_hot > 0]
    rest = predictions[one_hot < 1].reshape((10, 19))
    rest = np.max(rest, axis=1)
    hinge = rectify(1 + rest - correct)
    # compare
    assert np.allclose(hinge, c.eval({p: predictions, t: targets}))
Exemplo n.º 6
0
    def get_leq_constraints(self):
        tcparam = None
        for param in self.get_params(trainable=True):
            if param.name == "tc":
                tcparam = param
                break
        if tcparam is None:
            return []  # apparently no trainable timeconstant

        #dt = np.tile(self.dt, self.n_hidden)
        coeff = 1.0
        constr = coeff * TT.sum(NL.rectify(self.dt - tcparam))
        #         costheta = tcparam.dot(dt) / (tcparam.norm(2) * np.linalg.norm(dt))
        #         constr = TT.sqrt(2)*self.dt / (costheta - TT.sqrt(1 - costheta**2 + 1e-6)) - tcparam.norm(2)
        # <= constraint, so negate
        #return [(-TT.min(tcparam), -self.dt)]
        #return [(TT.maximum(self.dt - TT.min(tcparam), 0), 0.0)]
        return [(constr.astype(theano.config.floatX), 0.0)]
Exemplo n.º 7
0
 def __call__(self, x):
     if self.leakiness:
         import theano.tensor as T
         return T.maximum(self.leakiness * x, x)
     else:
         return rectify(x)
 def get_output_for(self, input, *args, **kwargs):
     return nonlinearities.rectify(input)
Exemplo n.º 9
0
def test_rectify():
    from lasagne.nonlinearities import rectify
    assert [rectify(x) for x in (-1, 0, 1, 2)] == [0, 0, 1, 2]
Exemplo n.º 10
0
 def get_output_for(self, input, *args, **kwargs):
     return nonlinearities.rectify(input)
Exemplo n.º 11
0
 def relu(self, x):
     return rectify(np.array(x))
Exemplo n.º 12
0
 def get_output_for(self, input, **kwargs):
     x = input
     p = self.pivot
     a = self.coef
     return nonlinearities.rectify(x-p) - a * nonlinearities.rectify(-(x - p))