コード例 #1
0
def init_params(options, preemb=None):
    """
    Initialize all parameters
    """
    params = OrderedDict()

    # Word embedding
    if preemb == None:
        params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
    else:
        print('using preemb')
        params['Wemb'] = preemb

    # Encoder
    params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
                                              nin=options['dim_word'], dim=options['dim'])

    # Decoder: next sentence
    params = get_layer(options['decoder'])[0](options, params, prefix='decoder_f',
                                              nin=options['dim_word'], dim=options['dim'])
    # Decoder: previous sentence
    params = get_layer(options['decoder'])[0](options, params, prefix='decoder_b',
                                              nin=options['dim_word'], dim=options['dim'])

    # Output layer
    params = get_layer('ff')[0](options, params, prefix='ff_logit', nin=options['dim'], nout=options['n_words'])

    return params
コード例 #2
0
ファイル: layers.py プロジェクト: felipemoraes/skip-thoughts
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None, ortho=True):
    """
    Affine transformation + point-wise nonlinearity
    """
    if nin == None:
        nin = options['dim_proj']
    if nout == None:
        nout = options['dim_proj']
    params[_p(prefix,'W')] = norm_weight(nin, nout, ortho=ortho)
    params[_p(prefix,'b')] = numpy.zeros((nout,)).astype('float32')

    return params
コード例 #3
0
ファイル: layers.py プロジェクト: felipemoraes/skip-thoughts
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
    """
    Gated Recurrent Unit (GRU)
    """
    if nin == None:
        nin = options['dim_proj']
    if dim == None:
        dim = options['dim_proj']
    W = numpy.concatenate([norm_weight(nin,dim),
                           norm_weight(nin,dim)], axis=1)
    params[_p(prefix,'W')] = W
    params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
    U = numpy.concatenate([ortho_weight(dim),
                           ortho_weight(dim)], axis=1)
    params[_p(prefix,'U')] = U

    Wx = norm_weight(nin, dim)
    params[_p(prefix,'Wx')] = Wx
    Ux = ortho_weight(dim)
    params[_p(prefix,'Ux')] = Ux
    params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')

    return params
コード例 #4
0
def init_params(options, preemb=None):
    """
    Initialize all parameters
    """
    params = OrderedDict()

    # Word embedding
    if preemb == None:
        params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
    else:
        print('using preemb')
        params['Wemb'] = preemb

    # Encoder
    params = get_layer(options['encoder'])[0](options,
                                              params,
                                              prefix='encoder',
                                              nin=options['dim_word'],
                                              dim=options['dim'])

    # Decoder: next sentence
    params = get_layer(options['decoder'])[0](options,
                                              params,
                                              prefix='decoder_f',
                                              nin=options['dim_word'],
                                              dim=options['dim'])
    # Decoder: previous sentence
    params = get_layer(options['decoder'])[0](options,
                                              params,
                                              prefix='decoder_b',
                                              nin=options['dim_word'],
                                              dim=options['dim'])

    # Output layer
    params = get_layer('ff')[0](options,
                                params,
                                prefix='ff_logit',
                                nin=options['dim'],
                                nout=options['n_words'])

    return params