Ejemplo n.º 1
0
def encoder(x, params, config):

    mb_size = config['mb_size']
    num_hidden = config['num_hidden']

    c_1 = ConvPoolLayer(in_channels = 1, out_channels = 128, in_length = 4000, batch_size = mb_size, kernel_len = 20, stride = 10, activation = "relu", batch_norm = True, W = params['Wc_enc_1'], b = params['bc_enc_1'])

    c_2 = ConvPoolLayer(in_channels = 128, out_channels = 256, in_length = 399, batch_size = mb_size, kernel_len = 20, stride = 10, activation = "relu", batch_norm = True, W = params['Wc_enc_2'], b = params['bc_enc_2'])

    c_3 = ConvPoolLayer(in_channels = 256, out_channels = 512, in_length = 38, batch_size = mb_size, kernel_len = 10, stride = 5, activation = "relu", batch_norm = True, W = params['Wc_enc_3'], b = params['bc_enc_3'])

    h_out_1 = HiddenLayer(num_in = 512 * 6, num_out = num_hidden, W = params['W_enc_1'], b = params['b_enc_1'], activation = 'relu', batch_norm = True)

    h_out_2 = HiddenLayer(num_in = num_hidden, num_out = num_hidden, W = params['W_enc_2'], b = params['b_enc_2'], activation = 'relu', batch_norm = True)

    print "x ndim", x.ndim

    c_1_value = T.specify_shape(c_1.output(x.reshape((128,1,4000))), (128, 128, 399))
    c_2_value = c_2.output(c_1_value)
    c_3_value = c_3.output(c_2_value)

    h_out_1_value = h_out_1.output(c_3_value.flatten(2))
    h_out_2_value = h_out_2.output(h_out_1_value)

    return {'h' : h_out_2_value}
Ejemplo n.º 2
0
def discriminator(x, z, params, mb_size, num_hidden, num_latent):

    import random as rng
    srng = theano.tensor.shared_randomstreams.RandomStreams(420)

    c_1 = ConvPoolLayer(in_channels = 1, out_channels = 128, in_length = 4000, batch_size = mb_size, kernel_len = 20, stride = 10, activation = "relu", batch_norm = False, W = params['W_c_1'], b = params['b_c_1'])

    c_2 = ConvPoolLayer(in_channels = 128, out_channels = 256, in_length = 399, batch_size = mb_size, kernel_len = 20, stride = 10, activation = "relu", batch_norm = False, W = params['W_c_2'], b = params['b_c_2'])

    c_3 = ConvPoolLayer(in_channels = 256, out_channels = 512, in_length = 38, batch_size = mb_size, kernel_len = 10, stride = 5, activation = "relu", batch_norm = False, W = params['W_c_3'], b = params['b_c_3'])

    c_h_1 = HiddenLayer(num_in = 6 * 512, num_out = num_hidden, W = params['W_ch_1'], b = params['b_ch_1'], activation = 'relu', batch_norm = False)

    h_out_1 = HiddenLayer(num_in = num_hidden + num_latent, num_out = num_hidden, activation = 'relu', batch_norm = False, W = params['W_disc_1'], b = params['b_disc_1'])

    h_out_2 = HiddenLayer(num_in = num_hidden, num_out = num_hidden, activation = 'relu', batch_norm = False, W = params['W_disc_2'], b = params['b_disc_2'])

    h_out_3 = HiddenLayer(num_in = num_hidden, num_out = num_hidden, activation = 'relu', batch_norm = False, W = params['W_disc_3'], b = params['b_disc_3'])

    h_out_4 = HiddenLayer(num_in = num_hidden, num_out = 1, activation = None, batch_norm = False, W = params['W_disc_4'], b = params['b_disc_4'])

    c_1_value = T.specify_shape(c_1.output(dropout(x, 0.8).reshape((128,1,4000))), (128,128,399))

    c_2_value = T.specify_shape(c_2.output(c_1_value), (128,256,38))

    c_3_value = T.specify_shape(c_3.output(c_2_value), (128,512,6))

    c_h_1_value = c_h_1.output(c_3_value.flatten(2))

    h_out_1_value = dropout(h_out_1.output(T.concatenate([z, c_h_1_value], axis = 1)))

    h_out_2_value = dropout(h_out_2.output(h_out_1_value), 0.2)

    h_out_3_value = dropout(h_out_3.output(h_out_2_value), 0.2)

    h_out_4_value = h_out_4.output(h_out_3_value)

    raw_y = h_out_4_value

    classification = T.nnet.sigmoid(raw_y)

    results = {'c' : classification}

    return results