Beispiel #1
0
def oneLayerBidirectionalLstmDecoder(bottom, sequenceLengths, hiddenDim_1,
                                     hiddenDim_target):

    time_dim = 1
    batch_dim = 0

    batchSize = bottom.shape[0].value
    maxSeqLen = bottom.shape[1].value

    samples_bw = tf.reverse_sequence(bottom, sequenceLengths, time_dim,
                                     batch_dim, 'vae_decoder_reverse_bottom')
    output_fw, output_bw, _, _ = bidirectionalLstmNetwork(
        'vae_decoder_BidirectionalLstm', bottom, samples_bw, 1, [hiddenDim_1],
        sequenceLengths)
    output_bw_reversed = tf.reverse_sequence(output_bw, sequenceLengths,
                                             time_dim, batch_dim,
                                             'vae_decoder_reverse_blstm_out')

    biLstmOut1 = tf.concat([output_fw, output_bw_reversed], 2)

    biLstmOut = tf.reshape(biLstmOut1, [-1, 2 * hiddenDim_1])

    linear_decoder_1 = linearLayer('vae_decoder_FC_1', biLstmOut, hiddenDim_1,
                                   True)

    reconstruction = tf.reshape(linearLayer("vae_decoder_reconstruction",
                                            linear_decoder_1, hiddenDim_target,
                                            False),
                                shape=[-1, maxSeqLen, hiddenDim_target])

    return reconstruction
Beispiel #2
0
def myModel2(samples, seqenceLengths, sequenceLength_max, outputNum):

    time_dim = 1
    batch_dim = 0
    n_classes = 20
    samples_bw = tf.reverse_sequence(samples, seqenceLengths, time_dim,
                                     batch_dim, 'reverse_bottom')

    output_fw, output_bw, _, _ = bidirectionalLstmNetwork(
        'BidirectionalLstm', samples, samples_bw, 1, [1024], seqenceLengths)

    output_bw_reversed = tf.reverse_sequence(output_bw, seqenceLengths,
                                             time_dim, batch_dim,
                                             'reverse_blstm_out')

    biLstmOut1 = tf.concat([output_fw, output_bw_reversed], 2)

    biLstmOut = tf.reshape(biLstmOut1, [-1, 2 * 1024])

    fc1 = linearLayer('FC_1', biLstmOut, 1024, True)

    fc1_dropout = tf.nn.dropout(fc1, 0.5)

    prediction = linearLayer('Linear_1', fc1_dropout, outputNum, False)

    return prediction
Beispiel #3
0
def blstmFramePredictionNetwork(samples,
                                seqenceLengths,
                                sequenceLength_max,
                                outputNum,
                                layerNum=3,
                                neuronNum=[512]):

    time_dim = 1
    batch_dim = 0

    samples_bw = tf.reverse_sequence(samples, seqenceLengths, time_dim,
                                     batch_dim, 'reverse_bottom')
    output_fw, output_bw, _, _ = bidirectionalLstmNetwork(
        'BidirectionalLstm', samples, samples_bw, layerNum, neuronNum,
        seqenceLengths)
    output_bw_reversed = tf.reverse_sequence(output_bw, seqenceLengths,
                                             time_dim, batch_dim,
                                             'reverse_blstm_out')

    biLstmOut1 = tf.concat([output_fw, output_bw_reversed], 2)
    biLstmOut = tf.reshape(biLstmOut1, [-1, 2 * neuronNum[-1]])

    fc1 = linearLayer('FC_1', biLstmOut, neuronNum[-1], True)

    fc1_dropout = tf.nn.dropout(fc1, 0.5)

    prediction = tf.reshape(
        linearLayer('Linear_1', fc1_dropout, outputNum, False),
        [-1, sequenceLength_max, outputNum])

    return prediction
Beispiel #4
0
def blstmSequencePredictionNetwork(samples,
                                   seqenceLengths,
                                   sequenceLength_max,
                                   outputNum,
                                   layerNum=3,
                                   neuronNum=[512]):

    time_dim = 1
    batch_dim = 0

    samples_bw = tf.reverse_sequence(samples, seqenceLengths, time_dim,
                                     batch_dim, 'reverse_bottom')
    output_fw, output_bw, _, _ = bidirectionalLstmNetwork(
        'BidirectionalLstm', samples, samples_bw, layerNum, neuronNum,
        seqenceLengths)
    output_bw_reversed = tf.reverse_sequence(output_bw, seqenceLengths,
                                             time_dim, batch_dim,
                                             'reverse_blstm_out')

    biLstmOut1 = tf.concat([output_fw, output_bw_reversed], 2)

    batch_size = tf.cast(tf.shape(biLstmOut1)[0], dtype=tf.int64)
    # Start indices for each sample
    index = tf.range(0, batch_size) * sequenceLength_max + (seqenceLengths - 1)
    # Indexing
    outputs = tf.gather(tf.reshape(biLstmOut1, [-1, 2 * neuronNum[-1]]), index)

    fc1 = linearLayer('FC_1', outputs, neuronNum[-1], True)

    fc1_dropout = tf.nn.dropout(fc1, 0.5)

    prediction = linearLayer('Linear_1', fc1_dropout, outputNum, False)

    return prediction
Beispiel #5
0
def oneLayerBidirectionalLstmEncoder(bottom, sequenceLengths, hiddenDims_1,
                                     hiddenDims_z):
    time_dim = 1
    batch_dim = 0

    batchSize = bottom.shape[0].value
    maxSequLen = bottom.shape[1].value

    samples_bw = tf.reverse_sequence(bottom, sequenceLengths, time_dim,
                                     batch_dim, 'vae_encoder_reverse_bottom')
    output_fw, output_bw, _, _ = bidirectionalLstmNetwork(
        'vae_encoder_BidirectionalLstm', bottom, samples_bw, 1, [hiddenDims_1],
        sequenceLengths)

    output_bw_reversed = tf.reverse_sequence(output_bw, sequenceLengths,
                                             time_dim, batch_dim,
                                             'vae_encoder_reverse_blstm_out')
    biLstmOut1 = tf.concat([output_fw, output_bw_reversed], 2)
    biLstmOut = tf.reshape(biLstmOut1, [-1, 2 * hiddenDims_1])

    linear1 = linearLayer('vae_encoder_FC_1', biLstmOut, hiddenDims_1, True)

    mean = tf.reshape(linearLayer("vae_encoder_mu", linear1, hiddenDims_z,
                                  False),
                      shape=[-1, maxSequLen, hiddenDims_z])
    SE_ln = tf.reshape(linearLayer("vae_encoder_SE_log", linear1, hiddenDims_z,
                                   False),
                       shape=[-1, maxSequLen, hiddenDims_z])

    return mean, SE_ln