コード例 #1
0
def network(klen, sd, nbase=smt.DEFAULT_NBASE, nfeature=1, winlen=11, stride=2, size=[32, 96, 128]):
    """ Create fat Nanonet with GRUs and convolution input layer

    :param klen: Length of kmer
    :param sd: Standard Deviation of initialisation noise
    :param nfeature: Number of features per time-step
    :param winlen: Length of convolution window over data
    :param stride: Stride over data
    :param size: Sizes of hidden recurrent layers

    :returns: a `class`:layer.Layer:
    """
    _prn = smt.partial(smt.truncated_normal, sd=sd)
    nstate = smt.nstate(klen, nbase=nbase)
    gru_act = smt.tanh
    ff_act = smt.tanh

    inlayer = smt.Convolution(nfeature, size[0], winlen, stride, init=_prn, has_bias=True, fun=ff_act)

    fwd1 = smt.Gru(size[0], size[1], init=_prn, has_bias=True, fun=gru_act)
    bwd1 = smt.Gru(size[0], size[1], init=_prn, has_bias=True, fun=gru_act)
    layer1 = smt.birnn(fwd1, bwd1)

    layer2 = smt.FeedForward(2 * size[1], size[2], has_bias=True, fun=ff_act)

    fwd3 = smt.Gru(size[2], size[1], init=_prn, has_bias=True, fun=gru_act)
    bwd3 = smt.Gru(size[2], size[1], init=_prn, has_bias=True, fun=gru_act)
    layer3 = smt.birnn(fwd3, bwd3)

    layer4 = smt.FeedForward(2 * size[1], size[2], init=_prn, has_bias=True, fun=ff_act)

    outlayer = smt.Softmax(size[2], nstate, init=_prn, has_bias=True)

    return smt.Serial([inlayer, layer1, layer2, layer3, layer4, outlayer])
コード例 #2
0
ファイル: tiny_gru.py プロジェクト: adityasarathy/sloika
def network(klen, sd, nbase=smt.DEFAULT_NBASE, nfeature=4, winlen=3, stride=1, size=4):
    """ Create standard Nanonet with GRU units

    :param klen: Length of kmer
    :param sd: Standard Deviation of initialisation noise
    :param nfeature: Number of features per time-step
    :param winlen: Length of window over data
    :param stride: Stride over data
    :param size: Size of hidden recurrent layers

    :returns: a `class`:layer.Layer:
    """
    assert stride == 1, "Model only supports stride of 1"

    _prn = smt.partial(smt.truncated_normal, sd=sd)
    nstate = smt.nstate(klen, nbase=nbase)
    gru_act = smt.tanh
    ff_act = smt.tanh

    insize = nfeature * winlen

    inlayer = smt.Window(nfeature, winlen)

    fwd1 = smt.Gru(insize, size, init=_prn, has_bias=True, fun=gru_act)
    bwd1 = smt.Gru(insize, size, init=_prn, has_bias=True, fun=gru_act)
    layer1 = smt.birnn(fwd1, bwd1)

    layer2 = smt.FeedForward(2 * size, size, has_bias=True, fun=ff_act)

    outlayer = smt.Softmax(size, nstate, init=_prn, has_bias=True)

    return smt.Serial([inlayer, layer1, layer2, outlayer])
コード例 #3
0
ファイル: raw_0.98_rgrgr.py プロジェクト: rrwick/sloika
def network(klen, sd, nbase=smt.DEFAULT_NBASE, nfeature=1, winlen=11, stride=5):
    """ Create a network with convolution input layer and five alternating-in-direction GRU layers

    :param klen: Length of kmer
    :param sd: Standard Deviation of initialisation noise
    :param nbase: Number of distinct bases
    :param nfeature: Number of features per time-step
    :param winlen: Length of window over data
    :param stride: Stride over data

    :returns: a `class`:layer.Layer:
    """

    n = 96
    fun = smt.tanh
    init = smt.partial(smt.truncated_normal, sd=sd)

    return smt.Serial([smt.Convolution(nfeature, n, winlen, stride, init=init, has_bias=True, fun=smt.elu),

                       smt.Reverse(smt.Gru(n, n, init=init, has_bias=True, fun=fun)),

                       smt.Gru(n, n, init=init, has_bias=True, fun=fun),

                       smt.Reverse(smt.Gru(n, n, init=init, has_bias=True, fun=fun)),

                       smt.Gru(n, n, init=init, has_bias=True, fun=fun),

                       smt.Reverse(smt.Gru(n, n, init=init, has_bias=True, fun=fun)),

                       smt.Softmax(n, smt.nstate(klen, nbase=nbase), init=init, has_bias=True)

                       ])