Example #1
0
def prep_model(glove, vocab, dropout=1/2, dropout_w=0, dropout_in=4/5, l2reg=1e-4,
               cnnact='tanh', cnninit='glorot_uniform', cdim={1: 1, 2: 1/2, 3: 1/2, 4: 1/2, 5: 1/2},
               project=True, pdim=2.5,
               ptscorer=B.mlp_ptscorer, mlpsum='sum', Ddim=1,
               oact='sigmoid'):
    model = Graph()
    N = B.embedding(model, glove, vocab, s0pad, s1pad, dropout, dropout_w)

    if dropout_in is None:
        dropout_in = dropout

    Nc = B.cnnsum_input(model, N, s0pad, dropout=dropout_in, l2reg=l2reg,
                        cnninit=cnninit, cnnact=cnnact, cdim=cdim)

    # Projection
    if project:
        model.add_shared_node(name='proj', inputs=['e0s_', 'e1s_'], outputs=['e0p', 'e1p'],
                              layer=Dense(input_dim=Nc, output_dim=int(N*pdim), W_regularizer=l2(l2reg)))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(dropout_in, input_shape=(N,)))
        # final_outputs = ['e0p_', 'e1p_']
        final_outputs = ['e0p', 'e1p']
    else:
        final_outputs = ['e0s_', 'e1s_']

    # Measurement
    kwargs = dict()
    if ptscorer == B.mlp_ptscorer:
        kwargs['sum_mode'] = mlpsum
    model.add_node(name='scoreS', input=ptscorer(model, final_outputs, Ddim, N, l2reg, **kwargs),
                   layer=Activation(oact))
    model.add_output(name='score', input='scoreS')
    return model
Example #2
0
def prep_model(glove, vocab, dropout=1/2, dropout_w=0, dropout_in=4/5, l2reg=1e-4,
               cnnact='tanh', cnninit='glorot_uniform', cdim={1: 1, 2: 1/2, 3: 1/2, 4: 1/2, 5: 1/2},
               project=True, pdim=2.5,
               ptscorer=B.mlp_ptscorer, mlpsum='sum', Ddim=1,
               oact='sigmoid'):
    model = Graph()
    N = B.embedding(model, glove, vocab, s0pad, s1pad, dropout, dropout_w)

    if dropout_in is None:
        dropout_in = dropout

    Nc = B.cnnsum_input(model, N, s0pad, dropout=dropout_in, l2reg=l2reg,
                        cnninit=cnninit, cnnact=cnnact, cdim=cdim)

    # Projection
    if project:
        model.add_shared_node(name='proj', inputs=['e0s_', 'e1s_'], outputs=['e0p', 'e1p'],
                              layer=Dense(input_dim=Nc, output_dim=int(N*pdim), W_regularizer=l2(l2reg)))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(dropout_in, input_shape=(N,)))
        # final_outputs = ['e0p_', 'e1p_']
        final_outputs = ['e0p', 'e1p']
    else:
        final_outputs = ['e0s_', 'e1s_']

    # Measurement
    kwargs = dict()
    if ptscorer == B.mlp_ptscorer:
        kwargs['sum_mode'] = mlpsum
    model.add_node(name='scoreS', input=ptscorer(model, final_outputs, Ddim, N, l2reg, **kwargs),
                   layer=Activation(oact))
    model.add_output(name='score', input='scoreS')
    return model
Example #3
0
def prep_model(model, N, s0pad, s1pad, c):
    Nc = B.cnnsum_input(model,
                        N,
                        s0pad,
                        siamese=c['cnnsiamese'],
                        dropout=c['dropout'],
                        l2reg=c['l2reg'],
                        cnninit=c['cnninit'],
                        cnnact=c['cnnact'],
                        cdim=c['cdim'])

    # Projection
    if c['project']:
        model.add_shared_node(name='proj',
                              inputs=['e0s_', 'e1s_'],
                              outputs=['e0p', 'e1p'],
                              layer=Dense(input_dim=Nc,
                                          output_dim=int(N * c['pdim']),
                                          W_regularizer=l2(c['l2reg']),
                                          activation=c['pact']))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(c['dropout'], input_shape=(N,)))
        # return ('e0p_', 'e1p_')
        return ('e0p', 'e1p')
    else:
        return ('e0s_', 'e1s_')
Example #4
0
def prep_model(model, N, s0pad, s1pad, c):
    Nc = B.cnnsum_input(model, N, s0pad,
                        dropout=c['dropout'], l2reg=c['l2reg'],
                        cnninit=c['cnninit'], cnnact=c['cnnact'], cdim=c['cdim'])

    # Projection
    if c['project']:
        model.add_shared_node(name='proj', inputs=['e0s_', 'e1s_'], outputs=['e0p', 'e1p'],
                              layer=Dense(input_dim=Nc, output_dim=int(N*c['pdim']),
                                          W_regularizer=l2(c['l2reg']), activation=c['pact']))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(c['dropout'], input_shape=(N,)))
        # return ('e0p_', 'e1p_')
        return ('e0p', 'e1p')
    else:
        return ('e0s_', 'e1s_')
Example #5
0
def prep_model(model, N, s0pad, s1pad, c):
    B.rnn_input(model,
                N,
                s0pad,
                return_sequences=True,
                dropout=c['dropout'],
                dropoutfix_inp=c['dropoutfix_inp'],
                dropoutfix_rec=c['dropoutfix_rec'],
                sdim=c['sdim'],
                rnnbidi=c['rnnbidi'],
                rnn=c['rnn'],
                rnnact=c['rnnact'],
                rnninit=c['rnninit'],
                rnnbidi_mode=c['rnnbidi_mode'],
                rnnlevels=c['rnnlevels'])

    Nc = B.cnnsum_input(model,
                        N,
                        s0pad,
                        inputs=['e0s_', 'e1s_'],
                        pfx='cnn',
                        dropout=c['dropout'],
                        l2reg=c['l2reg'],
                        cnninit=c['cnninit'],
                        cnnact=c['cnnact'],
                        cdim=c['cdim'])

    # Projection
    if c['project']:
        model.add_shared_node(name='proj',
                              inputs=['cnne0s_', 'cnne1s_'],
                              outputs=['e0p', 'e1p'],
                              layer=Dense(input_dim=Nc,
                                          output_dim=int(N * c['pdim']),
                                          W_regularizer=l2(c['l2reg']),
                                          activation=c['pact']))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(c['dropout'], input_shape=(N,)))
        # return ('e0p_', 'e1p_')
        return ('e0p', 'e1p')
    else:
        return ('cnne0s_', 'cnne1s_')
Example #6
0
def prep_model(model, N, s0pad, s1pad, c):
    B.rnn_input(model, N, s0pad, return_sequences=True,
                dropout=c['dropout'], dropoutfix_inp=c['dropoutfix_inp'], dropoutfix_rec=c['dropoutfix_rec'],
                sdim=c['sdim'],
                rnnbidi=c['rnnbidi'], rnn=c['rnn'], rnnact=c['rnnact'], rnninit=c['rnninit'],
                rnnbidi_mode=c['rnnbidi_mode'], rnnlevels=c['rnnlevels'])

    Nc = B.cnnsum_input(model, N, s0pad, inputs=['e0s_', 'e1s_'], pfx='cnn',
                        dropout=c['dropout'], l2reg=c['l2reg'],
                        cnninit=c['cnninit'], cnnact=c['cnnact'], cdim=c['cdim'])

    # Projection
    if c['project']:
        model.add_shared_node(name='proj', inputs=['cnne0s_', 'cnne1s_'], outputs=['e0p', 'e1p'],
                              layer=Dense(input_dim=Nc, output_dim=int(N*c['pdim']),
                                          W_regularizer=l2(c['l2reg']), activation=c['pact']))
        # This dropout is controversial; it might be harmful to apply,
        # or at least isn't a clear win.
        # model.add_shared_node(name='projdrop', inputs=['e0p', 'e1p'], outputs=['e0p_', 'e1p_'],
        #                       layer=Dropout(c['dropout'], input_shape=(N,)))
        # return ('e0p_', 'e1p_')
        return ('e0p', 'e1p')
    else:
        return ('cnne0s_', 'cnne1s_')