Ejemplo n.º 1
0
def test_recursive():
    # test layer-like API

    graph = containers.Graph()
    graph.add_input(name='input1', input_shape=(32, ))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1',
                     inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32, )))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)
    assert (loss < 2.5)

    loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
    seq.predict(X_test_graph)
    seq.get_config(verbose=1)
Ejemplo n.º 2
0
    def test_recursive(self):
        print('test layer-like API')

        graph = containers.Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')
        graph.add_output(name='output1',
                         inputs=['dense2', 'dense3'],
                         merge_mode='sum')

        seq = Sequential()
        seq.add(Dense(32, 32, name='first_seq_dense'))
        seq.add(graph)
        seq.add(Dense(4, 4, name='last_seq_dense'))

        seq.compile('rmsprop', 'mse')

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert (loss < 2.5)

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
Ejemplo n.º 3
0
    def convLayer(self, inp_dim, embedding_dim, nb_filter, filter_length):
        c = containers.Graph()
        c.add_input(name='input', input_shape=(inp_dim, embedding_dim))
        inps = []
        for i in filter_length:
            c.add_node(containers.Sequential([
                Convolution1D(
                    nb_filter=nb_filter,
                    filter_length=i,
                    border_mode='valid',
                    activation='relu',
                    subsample_length=1,
                    input_shape=(inp_dim, embedding_dim),
                ),
                MaxPooling1D(pool_length=2),
                Flatten()
            ]),
                       name='Conv{}'.format(i),
                       input='input')
            inps.append('Conv{}'.format(i))

        if len(inps) == 1:
            c.add_output('output', input=inps[0])
        else:
            c.add_output('output', inputs=inps)

        return c
Ejemplo n.º 4
0
    def __call__(self,
                 vocabulary_size=5000,
                 maxlen=100,
                 embedding_dim=300,
                 nb_filter=300,
                 filter_length=[3],
                 layer=-1,
                 hidden_dim=250,
                 nb_class=2,
                 drop_out_prob=0.5,
                 use_my_embedding=False,
                 embedding_weights=None):

        self.log_params(locals())
        model = Sequential()
        maxlen = self.add_embedding(model, vocabulary_size, embedding_dim,
                                    maxlen, use_my_embedding,
                                    embedding_weights)

        model.add(Reshape((1, maxlen, embedding_dim)))

        c = containers.Graph()
        c.add_input(name='input', input_shape=(1, maxlen, embedding_dim))
        inps = []
        for filter_h in filter_length:
            pool_size = (maxlen - filter_h + 1, 1)
            c.add_node(containers.Sequential([
                Convolution2D(nb_filter=nb_filter,
                              nb_row=filter_h,
                              nb_col=embedding_dim,
                              border_mode='valid',
                              activation='relu',
                              init='uniform',
                              input_shape=(1, maxlen, embedding_dim)),
                MaxPooling2D(pool_size=pool_size),
                Flatten()
            ]),
                       name='Conv{}'.format(filter_h),
                       input='input')
            inps.append('Conv{}'.format(filter_h))

        if len(inps) == 1:
            c.add_output('output', input=inps[0])
        else:
            c.add_output('output', inputs=inps)

        model.add(c)

        self.add_full(model, hidden_dim, drop_out_prob, nb_class)
        return model
Ejemplo n.º 5
0
    def __call__(self,
                 vocabulary_size=5000,
                 maxlen=100,
                 embedding_dim=300,
                 filter_length=[3],
                 layer=-1,
                 skip=2,
                 hidden_dim=250,
                 nb_class=2,
                 drop_out_prob=0.5,
                 use_my_embedding=False,
                 embedding_weights=None):

        filter_row = filter_length[0]
        filter_col = 1
        nb_filter = embedding_dim

        self.log_params(locals())
        model = Sequential()
        maxlen = self.add_embedding(model, vocabulary_size, embedding_dim,
                                    maxlen, use_my_embedding,
                                    embedding_weights)

        model.add(Permute((2, 1)))
        model.add(Reshape((nb_filter * maxlen, )))
        model.add(RepeatVector(filter_col))
        model.add(Permute((2, 1)))
        model.add(Reshape((nb_filter, maxlen, filter_col)))

        max_possible_layer = int(math.log(maxlen, 2)) - 1
        if layer > 0:
            assert layer < max_possible_layer
        else:
            layer = max_possible_layer

        for i in range(layer):
            input_shape = (nb_filter, maxlen / 2**i, filter_col)
            block = containers.Graph()
            input_name = 'block_{}_input'.format(i)
            identity_name = 'block_{}_identity'.format(i)
            block.add_input(input_name, input_shape=input_shape)
            self.graphadd(block, Identity(), input_name, identity_name)

            prev_output = input_name
            for j in range(skip):
                conv = Convolution2D(nb_filter=nb_filter,
                                     nb_row=filter_row,
                                     nb_col=1,
                                     border_mode='same',
                                     activation='relu',
                                     dim_ordering='th',
                                     subsample=(1, 1))
                prev_output = self.graphadd(block, conv, prev_output,
                                            'conv_{}_{}'.format(i, j))
                if j < skip:
                    prev_output = self.graphadd(block, Dropout(drop_out_prob),
                                                prev_output,
                                                'dropout_{}_{}'.format(i, j))

            block.add_output('block_{}_output'.format(i),
                             inputs=[prev_output, identity_name],
                             merge_mode='sum')

            model.add(block)
            model.add(MaxPooling2D(pool_size=(2, 1)))

            if i == max_possible_layer - 1:
                logging.debug("Last layer added")
                model.add(
                    Convolution2D(nb_filter=nb_filter,
                                  nb_row=2,
                                  nb_col=1,
                                  border_mode='valid',
                                  activation='relu',
                                  dim_ordering='th',
                                  subsample=(1, 1)))

        self.add_full(model, hidden_dim, drop_out_prob, nb_class)
        return model