Esempio n. 1
0
    def __init__(self, params):
        self.e_layer = WordEmbeddingLayer(embeddings=params.embeddings)
        self.c_layers = []

        for i in range(params.conv_layer_n):
            self.c_layers.append(
                ConvFoldingPoolLayer(params.ks[i],
                                     params.fold[i],
                                     W=params.W[i],
                                     b=params.b[i]))

        self.l_layer = LogisticRegression(params.logreg_W, params.logreg_b)
Esempio n. 2
0
 def load_from_theano_model(cls, model, word2id):
     return RNTN(embedding=model.embedding.get_value(),
                 rntn_layer=RNTNLayer(model.rntn_layer.V.get_value(),
                                      model.rntn_layer.W.get_value()),
                 logreg_layer=LogisticRegression(
                     model.logreg_layer.W.get_value(),
                     model.logreg_layer.b.get_value()),
                 word2id=word2id)
class DCNN(object):
    def __init__(self, params):
        self.e_layer = WordEmbeddingLayer(embeddings = params.embeddings)
        self.c_layers = []
        
        for i in range(params.conv_layer_n):
            self.c_layers.append(ConvFoldingPoolLayer(params.ks[i],
                                                      params.fold[i],
                                                      W = params.W[i],
                                                      b = params.b[i])
            )

        self.l_layer = LogisticRegression(
            params.logreg_W,
            params.logreg_b
        )

    def _p_y_given_x(self, x):
        output = self.e_layer.output(x)
        
        for l in self.c_layers:
            output = l.output(output)

        assert output.ndim == 4
        output = output.reshape(
            (output.shape[0], 
             np.prod(output.shape[1:]))
        )
        return self.l_layer._p_y_given_x(output)

    def predict(self, x):
        return np.argmax(self._p_y_given_x(x), axis = 1)
 
    # The following functions are 
    # FOR TESTING PURPOSE               
    #
    def _nnl(self, x, y):
        p_y_given_x = self._p_y_given_x(x)
        return np.mean(
            -np.log(p_y_given_x[np.arange(y.shape[0]), y])
        )

    def _errors(self, x, y):
        assert y.dtype == np.int32, "%r != %r" %(y.dtype, np.int32)
        pred_y = self.predict(x)
        return np.sum(pred_y != y) / float(pred_y.shape[0])


    def _c_layer_output(self, x):
        output = self.e_layer.output(x)
        
        for l in self.c_layers:
            output = l.output(output)

        return output
Esempio n. 4
0
class DCNN(object):
    def __init__(self, params):
        self.e_layer = WordEmbeddingLayer(embeddings = params.embeddings)
        self.c_layers = []
        
        for i in xrange(params.conv_layer_n):
            self.c_layers.append(ConvFoldingPoolLayer(params.ks[i],
                                                      params.fold[i],
                                                      W = params.W[i],
                                                      b = params.b[i])
            )

        self.l_layer = LogisticRegression(
            params.logreg_W,
            params.logreg_b
        )

    def _p_y_given_x(self, x):
        output = self.e_layer.output(x)
        
        for l in self.c_layers:
            output = l.output(output)

        assert output.ndim == 4
        output = output.reshape(
            (output.shape[0], 
             np.prod(output.shape[1:]))
        )
        return self.l_layer._p_y_given_x(output)

    def predict(self, x):
        return np.argmax(self._p_y_given_x(x), axis = 1)
 
    # The following functions are 
    # FOR TESTING PURPOSE               
    #
    def _nnl(self, x, y):
        p_y_given_x = self._p_y_given_x(x)
        return np.mean(
            -np.log(p_y_given_x[np.arange(y.shape[0]), y])
        )

    def _errors(self, x, y):
        assert y.dtype == np.int32, "%r != %r" %(y.dtype, np.int32)
        pred_y = self.predict(x)
        return np.sum(pred_y != y) / float(pred_y.shape[0])


    def _c_layer_output(self, x):
        output = self.e_layer.output(x)
        
        for l in self.c_layers:
            output = l.output(output)

        return output
Esempio n. 5
0
    def __init__(self, params):
        self.e_layer = WordEmbeddingLayer(embeddings = params.embeddings)
        self.c_layers = []
        
        for i in xrange(params.conv_layer_n):
            self.c_layers.append(ConvFoldingPoolLayer(params.ks[i],
                                                      params.fold[i],
                                                      W = params.W[i],
                                                      b = params.b[i])
            )

        self.l_layer = LogisticRegression(
            params.logreg_W,
            params.logreg_b
        )
Esempio n. 6
0
from logreg import LogisticRegression as TheanoLogisticRegression

from test_util import assert_matrix_eq

#########################
# NUMPY PART
#########################
# 5 labels and 10 inputs

W = np.random.rand(10, 5)
b = np.random.rand(5)

x = np.random.rand(3, 10)
y = np.asarray(np.random.randint(5, size=3), dtype=np.int32)

np_l = LogisticRegression(W, b)

#########################
# THEANO PART
#########################

x_symbol = theano.tensor.dmatrix('x')
y_symbol = theano.tensor.ivector('y')

th_l = TheanoLogisticRegression(rng=np.random.RandomState(1234),
                                input=x_symbol,
                                n_in=10,
                                n_out=5,
                                W=theano.shared(value=W, name="W"),
                                b=theano.shared(value=b, name="b"))
from test_util import assert_matrix_eq

#########################
# NUMPY PART
#########################
# 5 labels and 10 inputs

W = np.random.rand(10, 5)
b = np.random.rand(5)

x = np.random.rand(3, 10)
y = np.asarray(np.random.randint(5, size = 3), 
               dtype=np.int32
)

np_l = LogisticRegression(W, b)

#########################
# THEANO PART
#########################

x_symbol = theano.tensor.dmatrix('x')
y_symbol = theano.tensor.ivector('y')

th_l = TheanoLogisticRegression(rng = np.random.RandomState(1234), 
                                input = x_symbol, 
                                n_in = 10, 
                                n_out = 5,
                                W = theano.shared(value = W, 
                                                  name = "W"), 
                                b = theano.shared(value = b,